diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index fcd2657a8f21f46889f602d70761a34b0146a064..c56dc1dd00268b3e9a8091971467518aecb833d1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -4,7 +4,7 @@
 win_visual_studio_static_local_deps:
     tags:
         - Win
-
+ 
     before_script:
         - call VsDevCmd.bat
         - cd build_scripts\windows
@@ -61,7 +61,6 @@ win_visual_studio_static_local_deps:
 #        - call win_run_tests.bat
 #        - cd ..\..
 
-
 # Latest Ubuntu with Boost, Exprtk and Turtle
 # in system directories, Boost
 # installed from the official repository
@@ -69,7 +68,7 @@ win_visual_studio_static_local_deps:
 ubuntu_boost_system:
     tags:
         - centos7
-
+ 
     image: martinbeseda/lib4neuro-ubuntu-system-deps:latest
 
     before_script:
@@ -97,9 +96,6 @@ ubuntu_boost_local_static_deps:
     before_script:
         - export TERM=xterm
         - cd build_scripts/linux
-        - ./download_dependencies.sh
-        - cd ../..
-        - cd build_scripts/linux
         - export DEPENDENCIES_LINK_TYPE=static
         - export CLEAN_AFTER=yes
         - ./linux_gcc_build_x64_debug_local.sh
@@ -152,3 +148,4 @@ ubuntu_boost_local_static_deps:
 #        "registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
 #  artifacts:
 #    paths: [gl-code-quality-report.json]
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 82ab32dc57df8d3ce830de418a3b5343a8a3e51c..3ad955a65fcee29a52a73b32b02ac30dd81878f6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,10 +1,12 @@
-cmake_minimum_required(VERSION 3.0)
+cmake_minimum_required(VERSION 3.11)
 project(lib4neuro)
 
 message("Using CMake ${CMAKE_VERSION}")
 
 #TODO request newer version of CMake >=3.12
 
+#TODO use 'option' instead of 'set' for boolean variables!
+
 #TODO rewrite to use add_compile_definitions
 if(WIN32)
     add_compile_options("/D BOOST_ALL_NO_LIB")
@@ -58,7 +60,7 @@ endif()
 # Automatic settings #
 #--------------------#
 if(CMAKE_BUILD_TYPE MATCHES DEBUG)
-  set(CMAKE_VERBOSE_MAKEFILE ON)
+    option(CMAKE_VERBOSE_MAKEFILE ON)
 endif()
 
 #------------------------------------------------------------------------------------#
@@ -69,51 +71,92 @@ if (NOT ${MATCH} STREQUAL ${CMAKE_CURRENT_LIST_DIR})
     message(FATAL_ERROR "Illegal character(s) found in the path to the current directory!")
 endif()
 
+#------------------------------------------#
+# Detect maximum available number of cores #
+# and set corresponding build options      #
+#------------------------------------------#
+message("Detecting available cores count...")
+include(ProcessorCount)
+ProcessorCount(N_CORES)
+if(N_CORES GREATER 1)
+    math(EXPR N_CORES "${N_CORES}-1")
+    set(CTEST_BUILD_FLAGS -j${N_CORES})
+    set(ENV{N_CORES} ${N_CORES})
+    set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N_CORES})
+endif()
+message("Build will be performed on ${N_CORES} cores.")
+
+#----------------------------------------#
+# Set prefixes and suffixes of libraries #
+#----------------------------------------#
+set(LIB_PREFIX "lib")
+set(LIB_SUFFIX "a")  # suffix for Linux static libraries
+if("${DEPENDENCIES_LINK_TYPE}" STREQUAL "shared" AND WIN32)
+    set(LIB_PREFIX "")
+    set(LIB_SUFFIX "dll")
+elseif("${DEPENDENCIES_LINK_TYPE}" STREQUAL "static" AND WIN32)
+    set(LIB_SUFFIX "lib")
+elseif("${DEPENDENCIES_LINK_TYPE}" STREQUAL "shared")
+    set(LIB_SUFFIX "so")
+endif()
+
 #-------------------------#
 # Find external libraries #
 #-------------------------#
 
-#TODO download libraries using ExternalProject_Add() instead of external Bash script
-
 message("Looking for external libraries...")
 set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR})
 
-set(Boost_USE_MULTITHREADED ON)
-set(Boost_DEBUG ON)
+#TODO make downloading dependencies arbitrary
+option(ALLOW_DEPENDENCIES_DOWNLOAD "Allow external dependencies to be downloaded locally, if they're not found." ON)
+
+option(Boost_USE_MULTITHREADED ON)
+option(Boost_DEBUG ON)
 
 find_package(
     Boost
 
-    COMPONENTS 
-        system 
-        serialization 
+    COMPONENTS
+        system
+        serialization
         random
 )
-
-if(NOT Boost_FOUND)
-    message(FATAL_ERROR "Boost was NOT found! Specify variables BOOST_INCLUDEDIR and BOOST_LIBRARYDIR!")
+if((NOT Boost_FOUND) AND ALLOW_DEPENDENCIES_DOWNLOAD)
+    message("Boost will be downloaded and compiled locally in 'external_dependencies' folder.")
+	include(
+        DownloadBoost
+        RESULT_VARIABLE rv)
+    message("Boost download: " ${rv})
+elseif(NOT Boost_FOUND)
+    message(FATAL_ERROR "Boost was not found! Set variables BOOST_LIBRARYDIR and BOOST_INCLUDEDIR manually or set ALLOW_DEPENDENCIES_DOWNLOAD to ON to allow automatic download and compilation of Boost.")
 endif()
 
-find_package(exprtk)
-message("EXPRTK_INCLUDE_DIRS: ${EXPRTK_INCLUDE_DIRS}")
+find_package(Exprtk)
+if(NOT EXPRTK_FOUND AND ALLOW_DEPENDENCIES_DOWNLOAD)
+    message("Exprt will be downloaded and compiled locally in 'external_dependencies' folder.")
+    include(DownloadExprtk)
+endif()
 
 find_package(Turtle)
-message("TURTLE_INCLUDE_DIR: ${TURTLE_INCLUDE_DIR}")
+if(NOT TURTLE_FOUND AND ALLOW_DEPENDENCIES_DOWNLOAD)
+    message("Turtle will be downloaded and compiled locally in 'external_dependencies' folder.")
+    include(DownloadTurtle)
+endif()
 
+message("Checking Armadillo dependencies")
+find_package(LAPACK)
+find_package(BLAS)
+find_package(OpenBLAS)
+if(NOT LAPACK_FOUND AND NOT BLAS_FOUND AND NOT OpenBLAS_FOUND AND ALLOW_DEPENDENCIES_DOWNLOAD)
+    # Download and build OpenBLAS locally
+    message("LAPACK nor BLAS were found - OpenBLAS will be downloaded and built.")
+    include(DownloadOpenBLAS)
+endif()
 
-#------------------------------------------#
-# Detect maximum available number of cores #
-# and set corresponding build options      #
-#------------------------------------------#
-message("Detecting available cores count...")
-include(ProcessorCount)
-ProcessorCount(n_cores)
-if(n_cores GREATER 1)
-    math(EXPR n_cores "${n_cores}-1")
-    message("Build will be performed on ${n_cores} cores.")
-    set(CTEST_BUILD_FLAGS -j${N})
-    set(ENV{N_CORES} ${N})
-    set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
+find_package(Armadillo)
+if(NOT ARMADILLO_FOUND AND ALLOW_DEPENDENCIES_DOWNLOAD)
+    message("Armadillo will be downloaded and compiled locally in 'external_dependencies folder.")
+    include(DownloadArmadillo)
 endif()
 
 #---------------#
diff --git a/DownloadArmadillo.cmake b/DownloadArmadillo.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..89e94435299d303469fb23df937ed20ac44792f6
--- /dev/null
+++ b/DownloadArmadillo.cmake
@@ -0,0 +1,24 @@
+set(ARMADILLO_LOCAL_PATH ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/armadillo)
+
+include(FetchContent)
+
+######################
+# Download Armadillo #
+######################
+FetchContent_Declare(
+    armadillo
+    SOURCE_DIR ${ARMADILLO_LOCAL_PATH}
+    GIT_REPOSITORY https://gitlab.com/conradsnicta/armadillo-code.git
+    GIT_TAG 9.300.x  #TODO do some general solution!
+)
+
+set(FETCHCONTENT_QUIET FALSE)
+
+FetchContent_Populate(armadillo)
+
+find_package(Armadillo)
+
+if(NOT ARMADILLO_FOUND)
+    message(FATAL_ERROR "Armadillo was not downloaded successfully!")
+endif()
+
diff --git a/DownloadBoost.cmake b/DownloadBoost.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..18f2176026f617d2267029a59b8806d7d7c4d5b5
--- /dev/null
+++ b/DownloadBoost.cmake
@@ -0,0 +1,90 @@
+message("DownloadBoost starting...")
+
+set(BOOST_LOCAL_PATH ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost)
+
+include(FetchContent)
+
+##################
+# Download Boost #
+##################
+set(WINAPI_BOOST_LIB "")
+if(WIN32)
+    set(WINAPI_BOOST_LIB libs/winapi)
+endif()
+
+FetchContent_Declare(
+    boost
+    SOURCE_DIR ${BOOST_LOCAL_PATH}
+    GIT_REPOSITORY https://github.com/boostorg/boost.git
+    GIT_SUBMODULES tools/build tools/boost_install
+                    libs/system libs/random libs/serialization
+                    libs/config libs/headers libs/assert libs/core
+                    libs/integer libs/type_traits libs/mpl
+                    libs/preprocessor libs/throw_exception
+                    libs/utility libs/static_assert libs/smart_ptr
+                    libs/predef libs/move libs/io libs/iterator
+                    libs/detail libs/spirit libs/optional
+                    libs/function libs/type_index libs/bind
+                    libs/container_hash libs/array libs/test
+                    libs/timer libs/exception libs/algorithm
+                    libs/range libs/numeric libs/format
+                    libs/lexical_cast libs/concept_check
+                    libs/container libs/math libs/function_types
+                    libs/typeof ${WINAPI_BOOST_LIB}
+)
+
+set(FETCHCONTENT_QUIET FALSE)
+
+FetchContent_Populate(boost)
+
+###############
+# Build Boost #
+###############
+
+set(BOOTSTRAP_CMD sh bootstrap.sh)
+set(B2_CMD ./b2 -j${N_CORES})
+if(WIN32)
+    set(BOOTSTRAP_CMD bootstrap.bat)
+    set(B2_CMD b2 -j${N_CORES})
+endif()
+
+execute_process(
+    COMMAND ${BOOTSTRAP_CMD}
+    WORKING_DIRECTORY ${BOOST_LOCAL_PATH}
+    RESULT_VARIABLE rv
+)
+if(NOT rv STREQUAL "0")
+    message("Boost build: bootstrap: ${rv}")
+endif()
+
+execute_process(
+    COMMAND ${B2_CMD} headers
+    WORKING_DIRECTORY ${BOOST_LOCAL_PATH}
+    RESULT_VARIABLE rv
+)
+if(NOT rv STREQUAL "0")
+    message("Boost build: b2 headers: ${rv}")
+endif()
+
+execute_process(
+    COMMAND ${B2_CMD} -q cxxflags=-fPIC --layout=system variant=debug link=${DEPENDENCIES_LINK_TYPE} address-model=64 --with-system --with-serialization --with-random
+    WORKING_DIRECTORY ${BOOST_LOCAL_PATH}
+    RESULT_VARIABLE rv
+)
+if(NOT rv STREQUAL "0")
+    message("Boost build: b2: ${rv}")
+endif()
+
+find_package(
+    Boost
+
+    COMPONENTS
+        system
+        serialization
+        random
+)
+
+if(NOT Boost_FOUND)
+    message(FATAL_ERROR "Boost was not downloaded successfully!")
+endif()
+
diff --git a/DownloadExprtk.cmake b/DownloadExprtk.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..7523eac00cc0fa484110b7e58251623c9306a969
--- /dev/null
+++ b/DownloadExprtk.cmake
@@ -0,0 +1,23 @@
+set(EXPRTK_LOCAL_PATH ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/exprtk)
+
+include(FetchContent)
+
+###################
+# Download exprtk #
+###################
+FetchContent_Declare(
+    exprtk
+    SOURCE_DIR ${EXPRTK_LOCAL_PATH}
+    GIT_REPOSITORY https://github.com/ArashPartow/exprtk.git
+)
+
+set(FETCHCONTENT_QUIET FALSE)
+
+FetchContent_Populate(exprtk)
+
+find_package(Exprtk)
+
+if(NOT EXPRTK_FOUND)
+    message(FATAL_ERROR "Exprtk was not downloaded successfully!")
+endif()
+
diff --git a/DownloadOpenBLAS.cmake b/DownloadOpenBLAS.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..edb794b9174f0d077eaf2bb4da41b68c9311d448
--- /dev/null
+++ b/DownloadOpenBLAS.cmake
@@ -0,0 +1,49 @@
+message("DownloadOpenBLAS starting...")
+
+set(OPENBLAS_LOCAL_PATH ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/OpenBLAS)
+
+include(FetchContent)
+
+#####################
+# Download OpenBLAS #
+#####################
+FetchContent_Declare(
+    OpenBLAS
+    SOURCE_DIR ${OPENBLAS_LOCAL_PATH}
+    GIT_REPOSITORY https://github.com/xianyi/OpenBLAS.git
+)
+
+set(FETCHCONTENT_QUIET FALSE)
+
+FetchContent_Populate(OpenBLAS)
+
+##################
+# Build OpenBLAS #
+##################
+#execute_process(
+#    COMMAND cmake -j ${N_CORES} .
+#    WORKING_DIRECTORY ${OPENBLAS_LOCAL_PATH}
+#    RESULT_VARIABLE rv
+#)
+#if(NOT rv STREQUAL "0")
+#    message("OpenBLAS build: cmake .: ${rv}")
+#endif()
+#
+#execute_process(
+#    COMMAND cmake --build . -j ${N_CORES} --config Release
+#    WORKING_DIRECTORY ${OPENBLAS_LOCAL_PATH}
+#    RESULT_VARIABLE rv
+#)
+#if(NOT rv STREQUAL "0")
+#    message("OpenBLAS build: cmake --build . -j ${N_CORES}: ${rv}")
+#endif()
+
+set(OpenBLAS_DIR ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/OpenBLAS)
+find_package(OpenBLAS)
+
+if(NOT BLAS_FOUND)
+    message(FATAL_ERROR "OpenBLAS was not downloaded successfully!")
+else()
+    message("OpenBLAS libraries: " ${BLAS_LIBRARIES})
+endif()
+
diff --git a/DownloadTurtle.cmake b/DownloadTurtle.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..c89d1252778cade6fcba7e4da84d916584695fbd
--- /dev/null
+++ b/DownloadTurtle.cmake
@@ -0,0 +1,23 @@
+set(TURTLE_LOCAL_PATH ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/turtle)
+
+include(FetchContent)
+
+###################
+# Download exprtk #
+###################
+FetchContent_Declare(
+    turtle
+    SOURCE_DIR ${TURTLE_LOCAL_PATH}
+    GIT_REPOSITORY https://github.com/mat007/turtle.git
+)
+
+set(FETCHCONTENT_QUIET FALSE)
+
+FetchContent_Populate(turtle)
+
+find_package(Turtle)
+
+if(NOT TURTLE_FOUND)
+    message(FATAL_ERROR "Turtle was not downloaded successfully!")
+endif()
+
diff --git a/FindArmadillo.cmake b/FindArmadillo.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..972fa73d6233d51a1deb3d69ce111e288cf26499
--- /dev/null
+++ b/FindArmadillo.cmake
@@ -0,0 +1,69 @@
+message("FindArmadillo starting...")
+
+# Find headers
+FIND_PATH(
+    ARMADILLO_INCLUDE_DIR
+
+    NAMES
+        armadillo
+
+    HINTS
+        $ENV{ARMADILLO_INCLUDE_DIR}
+        ${ARMADILLO_INCLUDE_DIR}
+        external_dependencies/armadillo/
+
+    PATHS
+        /usr
+        /usr/local
+        /home
+        /opt/local
+
+    PATH_SUFFIXES
+        include
+        armadillo
+        include/armadillo
+)
+
+# Is Armadillo downloaded locally?
+option(LOCAL OFF)
+IF(${ARMADILLO_INCLUDE_DIR} MATCHES "^.*external_dependencies.*$")
+    message("Armadillo is downloaded locally - the library will be built when needed.")
+    set(LOCAL ON)
+    set(TMP "")
+    string(REGEX REPLACE "/include" "" TMP ${ARMADILLO_INCLUDE_DIR})
+    add_subdirectory(${TMP} ${TMP})
+endif()
+
+if(LOCAL)
+    # If Armadillo is downloaded locally, the library will be compiled during build-time
+    INCLUDE(FindPackageHandleStandardArgs)
+    FIND_PACKAGE_HANDLE_STANDARD_ARGS(
+            armadillo
+            "Armadillo was NOT found!"
+            ARMADILLO_INCLUDE_DIR)
+
+else()
+    # Find library
+    set(LIBNAME ${LIB_PREFIX}armadillo.so)
+    FIND_LIBRARY(
+        ARMADILLO_LIBRARY_DIR
+
+        NAMES
+        ${LIBNAME}
+
+        HINTS
+            external_dependencies/armadillo
+
+        PATHS
+            /usr/lib
+            /usr/local/lib
+    )
+
+    # Set ARMADILLO_FOUND honoring the QUIET and REQUIRED arguments
+    INCLUDE(FindPackageHandleStandardArgs)
+    FIND_PACKAGE_HANDLE_STANDARD_ARGS(
+            armadillo
+            "Armadillo was NOT found!"
+            ARMADILLO_INCLUDE_DIR
+            ARMADILLO_LIBRARY_DIR)
+endif()
\ No newline at end of file
diff --git a/FindBoost.cmake b/FindBoost.cmake
index 2845b300024c7243d229d0d14b70ccc8510d734c..e23824b37707dde8fe12c0c516c7be94238990b5 100644
--- a/FindBoost.cmake
+++ b/FindBoost.cmake
@@ -46,6 +46,7 @@ find_path(
         ${BOOST_INCLUDEDIR}
         $ENV{BOOST_INCLUDEDIR}
         ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost
+        ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost/src/boost
 
     PATHS
         /usr/include
@@ -63,7 +64,7 @@ string(REPLACE "/boost/boost" "/boost" TMP ${Boost_INCLUDE_DIRS})
 list(APPEND Boost_INCLUDE_DIRS ${TMP})
 
 if(NOT Boost_INCLUDE_DIRS)
-    message(FATAL_ERROR "Boost include directory was not found! Please, set variable BOOST_INCLUDEDIR to the correct path.")
+    #    message(FATAL_ERROR "Boost include directory was not found! Please, set variable BOOST_INCLUDEDIR to the correct path.")
 else()
     message("Boost_INCLUDE_DIRS: ${Boost_INCLUDE_DIRS}")
 endif()
@@ -73,17 +74,6 @@ if(NOT DEPENDENCIES_LINK_TYPE)
     message(FATAL_ERROR "Variable DEPENDENCIES_LINK_TYPE is not set! Set it to 'static' or 'shared'.")
 endif()
 
-set(LIB_PREFIX "lib")
-set(LIB_SUFFIX "a")  # suffix for Linux static libraries
-if("${DEPENDENCIES_LINK_TYPE}" STREQUAL "shared" AND WIN32)
-    set(LIB_PREFIX "")
-    set(LIB_SUFFIX "dll")
-elseif("${DEPENDENCIES_LINK_TYPE}" STREQUAL "static" AND WIN32)
-    set(LIB_SUFFIX "lib")
-elseif("${DEPENDENCIES_LINK_TYPE}" STREQUAL "shared")
-    set(LIB_SUFFIX "so")
-endif()
-
 set(REQUESTED_BOOST_LIBS "")
 foreach(COMPONENT ${Boost_FIND_COMPONENTS})
     list(APPEND REQUESTED_BOOST_LIBS "${LIB_PREFIX}boost_${COMPONENT}.${LIB_SUFFIX}")
@@ -106,6 +96,7 @@ find_path(
         ${BOOST_LIBRARYDIR}
         $ENV{BOOST_LIBRARYDIR}
         ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost
+        ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost/src/boost
         ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost/stage
         ${CMAKE_CURRENT_LIST_DIR}/external_dependencies/boost/stage/lib
 
@@ -119,53 +110,56 @@ find_path(
 )
 
 if(NOT Boost_LIBRARY_DIRS)
-    message(FATAL_ERROR "Boost library directory was not found! Please, set variable BOOST_LIBRARYDIR to the correct path.")
+    #message(FATAL_ERROR "Boost library directory was not found! Please, set variable BOOST_LIBRARYDIR to the correct path.")
 else()
     message("Boost_LIBRARY_DIRS: ${Boost_LIBRARY_DIRS}")
-endif()
-
+    
     # Construct list of libraries' names and make them
-# targets, so they may be linked
-set(Boost_LIBRARIES "")
-foreach(LIBNAME ${REQUESTED_BOOST_LIBS})
-    message("Looking for ${LIBNAME}...")
-
-    set(${LIBNAME} "${LIBNAME}-NOTFOUND")
-    find_library(
-        ${LIBNAME}
-
-        NAMES
-        ${LIBNAME}
-
-        PATHS
-            ${Boost_LIBRARY_DIRS}
-
-        PATH_SUFFIXES
-            stage/lib
-            lib
-
-        NO_DEFAULT_PATH
-    )
-
-    message("${LIBNAME} ${${LIBNAME}}")
-
-    # Check, if the Boost component was found
-    if("${${LIBNAME}}" STREQUAL "${LIBNAME}-NOTFOUND")
-        message(FATAL_ERROR "Boost library ${LIBNAME} was NOT found!\
-                             Please, set variable BOOST_LIBRARYDIR to the correct path and check the library names\
-                             format in your Boost installation.")
-    else()
-        message("${LIBNAME} was found: ${${LIBNAME}}")
-    endif()
-
-    list(APPEND Boost_LIBRARIES ${${LIBNAME}})
-endforeach()
-
-
-if(NOT Boost_LIBRARY_DIRS)
-    message(FATAL_ERROR "Boost library directory was not found! Please, set variable BOOST_LIBRARYDIR to the correct path.")
+	# targets, so they may be linked
+	set(Boost_LIBRARIES "")
+	foreach(LIBNAME ${REQUESTED_BOOST_LIBS})
+    	message("Looking for ${LIBNAME}...")
+	
+    	set(${LIBNAME} "${LIBNAME}-NOTFOUND")
+    	find_library(
+        	${LIBNAME}
+	
+        	NAMES
+        	${LIBNAME}
+	
+        	PATHS
+            	${Boost_LIBRARY_DIRS}
+	
+        	PATH_SUFFIXES
+            	stage/lib
+            	lib
+	
+        	NO_DEFAULT_PATH
+    	)
+	
+    	# Check, if the Boost component was found
+    	if("${${LIBNAME}}" STREQUAL "${LIBNAME}-NOTFOUND")
+        	#message(FATAL_ERROR "Boost library ${LIBNAME} was NOT found!\
+        	#                     Please, set variable BOOST_LIBRARYDIR to the correct path and check the library names\
+        	#                     format in your Boost installation.")
+    	else()
+        	message("${LIBNAME} was found: ${${LIBNAME}}")
+
+            # Add every found library as an IMPORTED target
+            string(TOUPPER ${DEPENDENCIES_LINK_TYPE} TMP)
+            string(REGEX REPLACE "^lib" "" TARGET_NAME ${LIBNAME})
+            string(REGEX REPLACE "\\.[a-z]*$" "" TARGET_NAME ${TARGET_NAME})
+            add_library(${TARGET_NAME} ${TMP} IMPORTED)
+            set_target_properties(${TARGET_NAME} PROPERTIES IMPORTED_LOCATION ${${LIBNAME}})
+            message("Created IMPORTED library target: ${TARGET_NAME}")
+    	endif()
+	
+    	list(APPEND Boost_LIBRARIES ${${LIBNAME}})
+	endforeach()
+	
 endif()
 
+
 # Set Boost_FOUND
 INCLUDE(FindPackageHandleStandardArgs)
 FIND_PACKAGE_HANDLE_STANDARD_ARGS(
@@ -179,6 +173,8 @@ FIND_PACKAGE_HANDLE_STANDARD_ARGS(
         Boost_LIBRARY_DIRS
 )
 
-message("Boost_INCLUDE_DIRS: ${Boost_INCLUDE_DIRS}")
-message("Boost_LIBRARY_DIRS: ${Boost_LIBRARY_DIRS}")
-message("Boost_LIBRARIES: ${Boost_LIBRARIES}")
+if(Boost_FOUND)
+	message("Boost_INCLUDE_DIRS: ${Boost_INCLUDE_DIRS}")
+	message("Boost_LIBRARY_DIRS: ${Boost_LIBRARY_DIRS}")
+	message("Boost_LIBRARIES: ${Boost_LIBRARIES}")
+endif()
diff --git a/Findexprtk.cmake b/FindExprtk.cmake
similarity index 85%
rename from Findexprtk.cmake
rename to FindExprtk.cmake
index 256b61b1535bb44f2ec16111006b13abaa2a0448..b7d984c26b2522b7e9bd98cae900d1da76552846 100644
--- a/Findexprtk.cmake
+++ b/FindExprtk.cmake
@@ -9,6 +9,8 @@
 #
 ################################################################################
 
+message("FindExprtk starting...")
+
 # Find headers and libraries
 FIND_PATH(
     EXPRTK_INCLUDE_DIR
@@ -42,8 +44,9 @@ FIND_PACKAGE_HANDLE_STANDARD_ARGS(
 IF(EXPRTK_FOUND)
     # Include dirs
     SET(EXPRTK_INCLUDE_DIRS ${EXPRTK_INCLUDE_DIR})
+    message("Exprtk was successfully found.")
 ELSE()
-    MESSAGE(FATAL_ERROR "Set, please, the environmental variable EXPRTK_INCLUDE_DIR to the folder, where 'exprtk.hpp' is located...")
+    #MESSAGE(FATAL_ERROR "Set, please, the environmental variable EXPRTK_INCLUDE_DIR to the folder, where 'exprtk.hpp' is located...")
 ENDIF(EXPRTK_FOUND)
 
 # Advanced options for not cluttering the cmake UIs:
diff --git a/FindOpenBLAS.cmake b/FindOpenBLAS.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..506c015753c76584479d26d081bd04b23b31c490
--- /dev/null
+++ b/FindOpenBLAS.cmake
@@ -0,0 +1,49 @@
+message("FindOpenBLAS starting...")
+
+set(OpenBLAS_ROOT external_dependencies/OpenBLAS)
+
+FIND_PATH(
+    OpenBLAS_INCLUDE_DIR
+
+    NAMES
+    cblas.h
+
+    HINTS
+        ${OpenBLAS_INCLUDE_DIRECTORY}
+        $ENV{OpenBLAS_INCLUDE_DIRECTORY}
+        /usr/include/
+        /usr/include/x86_64-linux-gnu
+        external_dependencies/OpenBLAS
+)
+
+FIND_LIBRARY(OpenBLAS_LIBRARIES
+    NAMES
+    ${LIB_PREFIX}openblas.${LIB_SUFFIX}
+
+    HINTS
+    ${OpenBLAS_LIBRARY_DIRECTORY}
+    $ENV{OpenBLAS_LIBRARY_DIRECTORY}
+    /usr/lib
+    /usr/lib/x86_64-linux-gnu
+    external_dependencies/OpenBLAS/lib
+)
+
+# Set OpenBLAS_Found
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(
+        OpenBLAS
+
+        FAIL_MESSAGE
+            "OpenBLAS was NOT found!"
+
+        REQUIRED_VARS
+            OpenBLAS_INCLUDE_DIR
+            OpenBLAS_LIBRARIES
+)
+
+IF (OpenBLAS_FOUND)
+   MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIBRARIES}")
+   MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}")
+ ELSE()
+    MESSAGE("Could not find OpenBLAS")
+ENDIF()
diff --git a/FindTurtle.cmake b/FindTurtle.cmake
index 08896aff3cd2536ed6cc02c0fe457049defba00f..dad37d10d660bb680c8bdefe5f9ca66035e1dd5a 100644
--- a/FindTurtle.cmake
+++ b/FindTurtle.cmake
@@ -1,3 +1,5 @@
+message("FindTurtle starting...")
+
 # Find headers and libraries
 FIND_PATH(
     TURTLE_INCLUDE_DIR
@@ -33,8 +35,9 @@ FIND_PACKAGE_HANDLE_STANDARD_ARGS(
 IF(TURTLE_FOUND)
     # Include dirs
     SET(TURTLE_INCLUDE_DIRS ${TURTLE_INCLUDE_DIR})
+    message("Turtle was successfully found.")
 ELSE()
-    MESSAGE(FATAL_ERROR "Set, please, the environmental variable TURTLE_INCLUDE_DIR to the folder, where 'mock.hpp' is located...")
+    #    MESSAGE(FATAL_ERROR "Set, please, the environmental variable TURTLE_INCLUDE_DIR to the folder, where 'mock.hpp' is located...")
 ENDIF(TURTLE_FOUND)
 
 # Add path only to the 'include' folder
diff --git a/build.bat b/build.bat
index ef30bdbb252c74569412e11dc83fd85e68ecbb98..471a1e44d9fae7d03f46140fbad0d6c897bd470b 100644
--- a/build.bat
+++ b/build.bat
@@ -2,10 +2,10 @@
 
 set CLEAN_AFTER=no
 
-call clean.bat
+rem call clean.bat
 
 cd build_scripts\windows
-call win_download_dependencies
+rem call win_download_dependencies
 set DEPENDENCIES_LINK_TYPE=static
 call win_VS_build_x64_debug.bat
 cd ..\..
diff --git a/build.sh b/build.sh
index 62b2828c60a1dfe10a044a3c52e97002954fdf06..e25792039c945f8b6c7105d64357f846f7c9c23a 100755
--- a/build.sh
+++ b/build.sh
@@ -2,10 +2,9 @@
 
 export CLEAN_AFTER=no
 
-./clean.sh
+#./clean.sh
 
 cd build_scripts/linux
-./download_dependencies.sh
-export DEPENDENCIES_LINK_TYPE=shared
+export DEPENDENCIES_LINK_TYPE=static
 ./linux_gcc_build_x64_debug_local.sh
 cd ../..
diff --git a/build_scripts/linux/linux_gcc_build_x64_debug_local.sh b/build_scripts/linux/linux_gcc_build_x64_debug_local.sh
index 5fe7ee813209fbe3fd3068659407bf8319434608..66b7cc4ec5e454e8082ca665f763f29a3e69bd63 100755
--- a/build_scripts/linux/linux_gcc_build_x64_debug_local.sh
+++ b/build_scripts/linux/linux_gcc_build_x64_debug_local.sh
@@ -13,7 +13,7 @@ BUILD_EXAMPLES=yes
 BUILD_TESTS=yes
 
 # Should we rebuild BOOST? (yes/no)
-REBUILD_BOOST=yes
+#REBUILD_BOOST=yes
 # Should we build the lib4neuro library? (yes)
 BUILD_LIB=yes
 
@@ -75,20 +75,19 @@ then
     BUILD_SOMETHING_LIB=yes
 fi
 
-if [ ${REBUILD_BOOST} = "yes" ]
-then
-    echo "The required '${CYAN}BOOST${NC}' library will be recompiled in the directory '${YELLOW}external_dependencies/boost${NC}'"
-    rm -rf ../../external_dependencies/boost/stage
-    rm -rf ../../external_dependencies/boost/bin.v2
-    BUILD_SOMETHING=yes
-fi
-
+#if [ ${REBUILD_BOOST} = "yes" ]
+#then
+#    echo "The required '${CYAN}BOOST${NC}' library will be recompiled in the directory '${YELLOW}external_dependencies/boost${NC}'"
+#    rm -rf ../../external_dependencies/boost/stage
+#    rm -rf ../../external_dependencies/boost/bin.v2
+#    BUILD_SOMETHING=yes
+#fi
 
 # Boost rebuild
-if [ ${REBUILD_BOOST} = "yes" ]
-then
-    ./build_boost_gcc.sh || BUILD_ERROR_OCCURED=1
-fi
+#if [ ${REBUILD_BOOST} = "yes" ]
+#then
+#    ./build_boost_gcc.sh || BUILD_ERROR_OCCURED=1
+#fi
 
 # Should we build the lib4neuro library? (yes)
 if [ ${BUILD_SOMETHING_LIB} = "yes" -a $BUILD_ERROR_OCCURED = "0" ]
diff --git a/build_scripts/windows/win_VS_build_x64_debug.bat b/build_scripts/windows/win_VS_build_x64_debug.bat
index 67fa05939a2e54dc7bad651b78c6597c6688cf64..eddf080bb3a211eaa57cd511b1c75c00896153f5 100644
--- a/build_scripts/windows/win_VS_build_x64_debug.bat
+++ b/build_scripts/windows/win_VS_build_x64_debug.bat
@@ -71,8 +71,8 @@ IF "%BUILD_EXAMPLES%"=="yes" (
 )
 
 IF "%REBUILD_BOOST%"=="yes" (
-    echo The required BOOST library will be recompiled in the directory 'external_dependencies\boost'
-	set BUILD_SOMETHING=yes
+rem    echo The required BOOST library will be recompiled in the directory 'external_dependencies\boost'
+rem	set BUILD_SOMETHING=yes
 )
 
 IF "%BUILD_SOMETHING%"=="yes" (
@@ -82,17 +82,17 @@ IF "%BUILD_SOMETHING%"=="yes" (
 
 rem Boost rebuild
 IF "%REBUILD_BOOST%"=="yes" (
-	title Rebuilding 'BOOST' for Debug
-    cd ..\..
-	
-	rmdir /s /q external_dependencies\boost\stage 2>NUL
-	rmdir /s /q external_dependencies\boost\bin.v2 2>NUL
-
-	cd external_dependencies\boost
-
-   .\b2 -q --layout=system variant=debug link=%LINK_TYPE% address-model=64 --with-system --with-serialization --with-random || goto error_occured_boost
-
-	cd ..\..\build_scripts\windows
+rem	title Rebuilding 'BOOST' for Debug
+    rem    cd ..\..
+    rem	
+    rem	rmdir /s /q external_dependencies\boost\stage 2>NUL
+    rem	rmdir /s /q external_dependencies\boost\bin.v2 2>NUL
+    rem
+    rem	cd external_dependencies\boost
+    rem
+    rem   .\b2 -q --layout=system variant=debug link=%LINK_TYPE% address-model=64 --with-system --with-serialization --with-random || goto error_occured_boost
+    rem
+rem	cd ..\..\build_scripts\windows
 )
 
 IF "%BUILD_SOMETHING_LIB%"=="yes" (
diff --git a/clean.bat b/clean.bat
index 3a38f7ba7480439bfdf5219ce748f890c2aa612d..44aedeff01368a7b5cc987414eb49e6ad4325fc6 100644
--- a/clean.bat
+++ b/clean.bat
@@ -18,3 +18,5 @@ rmdir /s /q CMakeFiles 2>NUL
 rmdir /s /q src/CMakeFiles 2>NUL
 rmdir /s /q src/examples/CMakeFiles 2>NUL
 rmdir /s /q src/tests/CMakeFiles 2>NUL
+rmdir /s /q external_dependencies 2>NUL
+rmdir /s /q _deps
diff --git a/clean.sh b/clean.sh
index 3f6c2d69384a9368fd53f590b515d28a074206e5..3a0e8d6fe8123e85a8511410c6b100e402ed9ae9 100755
--- a/clean.sh
+++ b/clean.sh
@@ -9,3 +9,5 @@ rm -f src/funit.tmp src/*_fun.f90
 rm -f CMakeCache.txt
 rm -f cmake_install.cmake src/cmake_install.cmake
 rm -rf CMakeFiles src/CMakeFiles src/examples/CMakeFiles src/tests/CMakeFiles
+rm -rf external_dependencies/*
+rm -rf _deps
diff --git a/external_dependencies/boost b/external_dependencies/boost
index a86cf5b7c6742da6113b598a8d9c0c3a3cdf8175..d7d15be791bffc9f64e4c65b123a5f37c6a9b032 160000
--- a/external_dependencies/boost
+++ b/external_dependencies/boost
@@ -1 +1 @@
-Subproject commit a86cf5b7c6742da6113b598a8d9c0c3a3cdf8175
+Subproject commit d7d15be791bffc9f64e4c65b123a5f37c6a9b032
diff --git a/include/4neuro.h b/include/4neuro.h
index faa7552c762696822c5a1a718cdd2942aba9e29c..8012abb0ee1ad330931117e8a98e4b7163a8944e 100644
--- a/include/4neuro.h
+++ b/include/4neuro.h
@@ -17,10 +17,21 @@
 #include "../src/Neuron/NeuronLogistic.h"
 #include "../src/Solvers/DESolver.h"
 #include "../src/ErrorFunction/ErrorFunctions.h"
+#include "../src/LearningMethods/ParticleSwarm.h"
+#include "../src/LearningMethods/GradientDescent.h"
+#include "../src/LearningMethods/GradientDescentBB.h"
+#include "../src/LearningMethods/GradientDescentSingleItem.h"
+#include "../src/LearningMethods/LearningSequence.h"
+#include "../src/LearningMethods/LevenbergMarquardt.h"
+
+
 #include "../src/constants.h"
 #include "../src/CSVReader/CSVReader.h"
 #include "../src/CrossValidator/CrossValidator.h"
 
+#include "../src/message.h"
+#include "../src/exceptions.h"
+
 // Abbreaviate lib4neuro namespace to l4n
 namespace l4n = lib4neuro;
 
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 326e1d42b3b9b92c72610f2866116bb774f28fa5..d9e1c77d14d018eee4a52214ff9e2b1022492006 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -51,8 +51,10 @@ if ("${BUILD_LIB}" STREQUAL "yes")
             Network/NeuralNetworkSum.cpp
             NetConnection/ConnectionFunctionGeneral.cpp
             NetConnection/ConnectionFunctionIdentity.cpp
+            LearningMethods/LearningMethods.cpp
             LearningMethods/ParticleSwarm.cpp
             LearningMethods/GradientDescent.cpp
+            LearningMethods/LevenbergMarquardt.cpp
             LearningMethods/GradientDescentBB.cpp
             DataSet/DataSet.cpp
             ErrorFunction/ErrorFunctions.cpp
@@ -60,7 +62,9 @@ if ("${BUILD_LIB}" STREQUAL "yes")
             CSVReader/CSVReader.cpp
             CrossValidator/CrossValidator.cpp
             NormalizationStrategy/NormalizationStrategy.cpp
-            LearningMethods/GradientDescentSingleItem.cpp LearningMethods/GradientDescentSingleItem.h LearningMethods/LearningSequence.cpp LearningMethods/LearningSequence.h)
+            LearningMethods/GradientDescentSingleItem.cpp
+            LearningMethods/LearningSequence.cpp
+    )
 
     # FileSystem C++ library - has to be linked manually in GCC-8
     set(CXX_FILESYSTEM_LIB "")
@@ -75,6 +79,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
             exprtk_wrap
             ${Boost_LIBRARIES}
             ${CXX_FILESYSTEM_LIB}
+            armadillo
     )
 
     target_include_directories(
@@ -87,6 +92,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
             ${EXPRTK_INCLUDE_DIR}
             ${SRC_DIR}
             ${Boost_INCLUDE_DIRS}
+            ${ARMADILLO_INCLUDE_DIRS}
     )
 
     set_target_properties(
diff --git a/src/CrossValidator/CrossValidator.cpp b/src/CrossValidator/CrossValidator.cpp
index 1a5beca7bafd023fc72bbaf91bfa39eef36fbe6e..b345f36cff44a1ae757899489a03377e20f25726 100644
--- a/src/CrossValidator/CrossValidator.cpp
+++ b/src/CrossValidator/CrossValidator.cpp
@@ -6,7 +6,7 @@
 #include "message.h"
 
 namespace lib4neuro {
-    LIB4NEURO_API CrossValidator::CrossValidator(ILearningMethods* optimizer, ErrorFunction* ef) {
+    LIB4NEURO_API CrossValidator::CrossValidator(LearningMethod* optimizer, ErrorFunction* ef) {
         this->optimizer = optimizer;
         this->ef = ef;
     }
diff --git a/src/CrossValidator/CrossValidator.h b/src/CrossValidator/CrossValidator.h
index 8ee16286715a225592fbc8d31a3e9a078dea92fd..269dcec5e0861286385acbfb7291990981d9b3bc 100644
--- a/src/CrossValidator/CrossValidator.h
+++ b/src/CrossValidator/CrossValidator.h
@@ -7,7 +7,7 @@
 
 #include "../settings.h"
 #include "../DataSet/DataSet.h"
-#include "../LearningMethods/ILearningMethods.h"
+#include "../LearningMethods/LearningMethod.h"
 
 namespace lib4neuro {
 
@@ -20,7 +20,7 @@ namespace lib4neuro {
         /**
          *
          */
-        ILearningMethods* optimizer;
+        LearningMethod* optimizer;
 
         /**
          *
@@ -34,7 +34,7 @@ namespace lib4neuro {
          * @param optimizer
          * @param data_set
          */
-        LIB4NEURO_API CrossValidator(ILearningMethods* optimizer, ErrorFunction* ef);
+        LIB4NEURO_API CrossValidator(LearningMethod* optimizer, ErrorFunction* ef);
 
         /**
          *
diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index 2e9dfe5f3ff84586f4868a6beb71e451b7905b8a..1501e1ac44707e5f7a628607ce1b8bffd795b3be 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -407,6 +407,7 @@ namespace lib4neuro {
             srand(time(NULL));  //TODO use Mersen twister from Boost
 
             size_t n_chosen = rand() % std::min(max, this->data.size())+1;
+            n_chosen = max;
             std::vector<size_t> chosens;
             size_t chosen;
 
@@ -418,6 +419,7 @@ namespace lib4neuro {
                     i--;
                 } else {
                     newData.push_back(this->data.at(chosen));
+                    chosens.push_back( chosen );
                 }
             }
 
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index c8a67ec0d8aa5bfd50ba645701ce2405e74a670b..b57e7014c4771b188705a45a886b6978cd08ff72 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -99,10 +99,27 @@ namespace lib4neuro {
         this->dimension = net->get_n_weights() + net->get_n_biases();
     }
 
-    double MSE::eval_on_data_set(lib4neuro::DataSet* data_set, std::ofstream* results_file_path,
-                                 std::vector<double>* weights) {
-        //TODO do NOT duplicate code - rewrite the function in a better way
+    double MSE::eval_on_single_input(std::vector<double>* input,
+                                     std::vector<double>* output,
+                                     std::vector<double>* weights) {
+        std::vector<double> predicted_output(this->get_network_instance()->get_n_outputs());
+        this->net->eval_single(*input, predicted_output, weights);
+        double result = 0;
+        double val;
 
+        for(size_t i = 0; i < output->size(); i++) {
+            val = output->at(i) - predicted_output.at(i);
+            result += val*val;
+        }
+
+        return std::sqrt(result);
+    }
+
+    double MSE::eval_on_data_set(lib4neuro::DataSet* data_set,
+                                 std::ofstream* results_file_path,
+                                 std::vector<double>* weights,
+                                 bool denormalize_data,
+                                 bool verbose) {
         size_t dim_in = data_set->get_input_dim();
         size_t dim_out = data_set->get_output_dim();
         double error = 0.0, val, output_norm = 0;
@@ -114,22 +131,26 @@ namespace lib4neuro {
         std::vector<std::vector<double>> outputs(data->size());
         std::vector<double> output(dim_out);
 
-        COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
-        COUT_DEBUG(R_ALIGN << "[Element index]" << " "
-                   << R_ALIGN << "[Input]" << " "
-                   << R_ALIGN << "[Real output]" << " "
-                   << R_ALIGN << "[Predicted output]" << " "
-                   << R_ALIGN << "[Absolute error]" << " "
-                   << R_ALIGN << "[Relative error %]"
-                   << std::endl);
-
-        *results_file_path << R_ALIGN << "[Element index]" << " "
-                           << R_ALIGN << "[Input]" << " "
-                           << R_ALIGN << "[Real output]" << " "
-                           << R_ALIGN << "[Predicted output]" << " "
-                           << R_ALIGN << "[Abs. error]" << " "
-                           << R_ALIGN << "[Rel. error %]"
-                           << std::endl;
+        if (verbose) {
+            COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
+            COUT_DEBUG(R_ALIGN << "[Element index]" << " "
+                               << R_ALIGN << "[Input]" << " "
+                               << R_ALIGN << "[Real output]" << " "
+                               << R_ALIGN << "[Predicted output]" << " "
+                               << R_ALIGN << "[Absolute error]" << " "
+                               << R_ALIGN << "[Relative error %]"
+                               << std::endl);
+        }
+
+        if (results_file_path) {
+            *results_file_path << R_ALIGN << "[Element index]" << " "
+                               << R_ALIGN << "[Input]" << " "
+                               << R_ALIGN << "[Real output]" << " "
+                               << R_ALIGN << "[Predicted output]" << " "
+                               << R_ALIGN << "[Abs. error]" << " "
+                               << R_ALIGN << "[Rel. error %]"
+                               << std::endl;
+        }
 
         for (auto i = 0; i < data->size(); i++) {  // Iterate through every element in the test set
             /* Compute the net output and store it into 'output' variable */
@@ -140,41 +161,62 @@ namespace lib4neuro {
             outputs.at(i) = output;
         }
 
-        bool denormalize_output = false;
-        if(data_set->is_normalized()) {
-            data_set->de_normalize();
-            denormalize_output = true;
-        }
+        double denormalized_output;
+        double denormalized_real_input;
+        double denormalized_real_output;
+
+//        bool denormalize_output = false;
+//        if (denormalize_data) {
+//            if(data_set->is_normalized()) {
+//                data_set->de_normalize();
+//            }
+//            denormalize_output = true;
+//        }
 
         for (auto i = 0; i < data->size(); i++) {
 
             /* Compute difference for every element of the output vector */
 #ifdef L4N_DEBUG
             std::stringstream ss_input;
-            for(auto j = 0; j < dim_in-1; j++) {
-                ss_input << data->at(i).first.at(j) << ",";
+            std::string separator = "";
+            for (auto j = 0; j < dim_in; j++) {
+                if(denormalize_data) {
+                    denormalized_real_input = data_set->get_normalization_strategy()->de_normalize(data->at(i).first.at(j));
+                } else {
+                    denormalized_real_input = data->at(i).first.at(j);
+                }
+                ss_input << separator << denormalized_real_input;
+                separator = ",";
+            }
+            if(denormalize_data) {
+                denormalized_real_input = data_set->get_normalization_strategy()->de_normalize(data->at(i).first.back());
+            } else {
+                denormalized_real_input = data->at(i).first.back();
             }
-            ss_input << data->at(i).first.back();
 
             std::stringstream ss_real_output;
             std::stringstream ss_predicted_output;
 #endif
-            double denormalized_output;
+
             double loc_error = 0;
             output_norm = 0;
+            separator = "";
             for (size_t j = 0; j < dim_out; ++j) {
-                if(denormalize_output) {
+                if (denormalize_data) {
+                    denormalized_real_output = data_set->get_normalization_strategy()->de_normalize(data->at(i).second.at(j));
                     denormalized_output = data_set->get_normalization_strategy()->de_normalize(outputs.at(i).at(j));
                 } else {
+                    denormalized_real_output = data->at(i).second.at(j);
                     denormalized_output = outputs.at(i).at(j);
                 }
 
 #ifdef L4N_DEBUG
-                ss_real_output << data->at(i).second.at(j);
-                ss_predicted_output << denormalized_output;
+                ss_real_output << separator << denormalized_real_output;
+                ss_predicted_output << separator << denormalized_output;
+                separator = ",";
 #endif
 
-                val = denormalized_output - data->at(i).second.at(j);
+                val = denormalized_output - denormalized_real_output;
                 loc_error += val * val;
                 error += loc_error;
 
@@ -185,251 +227,182 @@ namespace lib4neuro {
             std::stringstream ss_ind;
             ss_ind << "[" << i << "]";
 
-            COUT_DEBUG(R_ALIGN << ss_ind.str() << " "
-                       << R_ALIGN << ss_input.str() << " "
-                       << R_ALIGN << ss_real_output.str() << " "
-                       << R_ALIGN << ss_predicted_output.str() << " "
-                       << R_ALIGN << std::sqrt(loc_error) << " "
-                       << R_ALIGN << 200.0 * std::sqrt(loc_error) / (std::sqrt(loc_error) + std::sqrt(output_norm))
-                       << std::endl);
-
-            *results_file_path << R_ALIGN << ss_ind.str() << " "
-                               << R_ALIGN << ss_input.str() << " "
-                               << R_ALIGN << ss_real_output.str() << " "
-                               << R_ALIGN << ss_predicted_output.str() << " "
-                               << R_ALIGN << std::sqrt(loc_error) << " "
-                               << R_ALIGN << 200.0 * std::sqrt(loc_error) / (std::sqrt(loc_error) + std::sqrt(output_norm))
-                               << std::endl;
-#endif
-        }
-
-        double result = std::sqrt(error) / n_elements;
-
-        COUT_DEBUG("MSE = " << result << std::endl);
-        *results_file_path << "MSE = " << result << std::endl;
-
-        return result;
-    }
-
-    double MSE::eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights) {
-        //TODO do NOT duplicate code - rewrite the function in a better way
-
-        size_t dim_out = data_set->get_output_dim();
-        size_t n_elements = data_set->get_n_elements();
-        double error = 0.0, val;
-
-        std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = data_set->get_data();
-
-        //TODO instead use something smarter
-        std::vector<std::vector<double>> outputs(data->size());
-        std::vector<double> output(dim_out);
-
-        COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
-        COUT_DEBUG(R_ALIGN << "[Input]" << " "
-                   << R_ALIGN << "[Real output]" << " "
-                   << R_ALIGN << "[Predicted output]" << " "
-                   << std::endl);
-
-        std::ofstream ofs(results_file_path);
-        if (!ofs.is_open()) {
-            THROW_RUNTIME_ERROR("File path: " + results_file_path + " was not successfully opened!");
-        }
-
-        ofs << R_ALIGN << "[Index]" << " "
-            << R_ALIGN << "[Input]" << " "
-            << R_ALIGN << "[Real output]" << " "
-            << R_ALIGN << "[Predicted output]"
-            << std::endl;
-
-        for (auto i = 0; i < data->size(); i++) {  // Iterate through every element in the test set
-
-            /* Compute the net output and store it into 'output' variable */
-            this->net->eval_single(data->at(i).first,
-                                   output,
-                                   weights);
-
-            outputs.at(i) = output;
-        }
-
-        bool denormalize_output = false;
-        if(data_set->is_normalized()) {
-            data_set->de_normalize();
-            denormalize_output = true;
-        }
-
-        for(auto i = 0; i < data->size(); i++) {
-            /* Compute difference for every element of the output vector */
-            double denormalized_output;
-            for (size_t j = 0; j < dim_out; ++j) {
-                if(denormalize_output) {
-                    denormalized_output = data_set->get_normalization_strategy()->de_normalize(outputs.at(i).at(j));
-                } else {
-                    denormalized_output = outputs.at(i).at(j);
-                }
-
-                std::stringstream ss_ind;
-                ss_ind << "[" << i << "]";
-
+            if (verbose) {
                 COUT_DEBUG(R_ALIGN << ss_ind.str() << " "
-                           << R_ALIGN << data->at(i).first.at(j) << " "
-                           << R_ALIGN << data->at(i).second.at(j) << " "
-                           << R_ALIGN << denormalized_output
-                           << std::endl);
-
-                ofs << R_ALIGN << ss_ind.str() << " "
-                    << R_ALIGN << data->at(i).first.at(j) << " "
-                    << R_ALIGN << data->at(i).second.at(j) << " "
-                    << R_ALIGN << denormalized_output
-                    << std::endl;
-
-                val = denormalized_output - data->at(i).second.at(j);
-                error += val * val;
+                                   << R_ALIGN << ss_input.str() << " "
+                                   << R_ALIGN << ss_real_output.str() << " "
+                                   << R_ALIGN << ss_predicted_output.str() << " "
+                                   << R_ALIGN << std::sqrt(loc_error) << " "
+                                   << R_ALIGN
+                                   << 200.0 * std::sqrt(loc_error) / (std::sqrt(loc_error) + std::sqrt(output_norm))
+                                   << std::endl);
             }
 
-            ofs << std::endl;
+            if (results_file_path) {
+                *results_file_path << R_ALIGN << ss_ind.str() << " "
+                                   << R_ALIGN << ss_input.str() << " "
+                                   << R_ALIGN << ss_real_output.str() << " "
+                                   << R_ALIGN << ss_predicted_output.str() << " "
+                                   << R_ALIGN << std::sqrt(loc_error) << " "
+                                   << R_ALIGN
+                                   << 200.0 * std::sqrt(loc_error) / (std::sqrt(loc_error) + std::sqrt(output_norm))
+                                   << std::endl;
+            }
+#endif
         }
 
-
         double result = std::sqrt(error) / n_elements;
 
-        ofs << "MSE = " << result << std::endl;
-        ofs.close();
-
-        COUT_DEBUG("MSE = " << result << std::endl);
-
-        return result;
-    }
-
-    double MSE::eval_on_data_set(DataSet* data_set, std::vector<double>* weights) {
-        size_t dim_out = data_set->get_output_dim();
-        size_t n_elements = data_set->get_n_elements();
-        double error = 0.0, val;
-
-        std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = data_set->get_data();
-
-        //TODO instead use something smarter
-        std::vector<std::vector<double>> outputs(data->size());
-        std::vector<double> output(dim_out);
-
-        COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
-        COUT_DEBUG(R_ALIGN << "[Input]" << " "
-                   << R_ALIGN << "[Real output]" << " "
-                   << R_ALIGN << "[Predicted output]" << " "
-                   << std::endl);
-
-        /* Compute predicted outputs */
-        for (auto i = 0; i < data->size(); i++) {  // Iterate through every element in the test set
-
-            /* Compute the net output and store it into 'output' variable */
-            this->net->eval_single(data->at(i).first,
-                                   output,
-                                   weights);
-
-            outputs.at(i) = output;
+        if (verbose) {
+            COUT_DEBUG("MSE = " << result << std::endl);
         }
 
-        /* De-normalize data-set, if it's normalized */
-        bool denormalize_output = false;
-        if(data_set->is_normalized()) {
-            data_set->de_normalize();
-            denormalize_output = true;
+        if (results_file_path) {
+            *results_file_path << "MSE = " << result << std::endl;
         }
 
-        /* Evaluate the prediction error on de-normalized data */
-        for(auto i = 0; i < data->size(); i++) {
-
-            /* Compute difference for every element of the output vector */
-            double denormalized_output;
-            for (auto j = 0; j < dim_out; ++j) {
-                if(denormalize_output) {
-                    denormalized_output = data_set->get_normalization_strategy()->de_normalize(outputs.at(i).at(j));
-                } else {
-                    denormalized_output = outputs.at(i).at(j);
-                }
-
-                std::stringstream ss_ind;
-                ss_ind << "[" << i << "]";
-
-                COUT_DEBUG(R_ALIGN << ss_ind.str() << " "
-                           << R_ALIGN << data->at(i).first.at(j) << " "
-                           << R_ALIGN << data->at(i).second.at(j) << " "
-                           << R_ALIGN << denormalized_output
-                           << std::endl);
-
-                val = denormalized_output - data->at(i).second.at(j);
-                error += val * val;
-            }
-        }
-
-        double result = std::sqrt(error)/n_elements;
-        COUT_DEBUG("MSE = " << result << std::endl);
-
         return result;
     }
 
-    double MSE::eval(std::vector<double>* weights) {
+    double MSE::eval_on_data_set(DataSet* data_set,
+                                 std::string results_file_path,
+                                 std::vector<double>* weights,
+                                 bool verbose) {
+        std::ofstream ofs(results_file_path);
+        if (ofs.is_open()) {
+            return this->eval_on_data_set(data_set,
+                                          &ofs,
+                                          weights,
+                                          true,
+                                          verbose);
+            ofs.close();
+        } else {
+            THROW_RUNTIME_ERROR("File " + results_file_path + " couldn't be open!");
+        }
+    }
+
+    double MSE::eval_on_data_set(DataSet* data_set,
+                                 std::vector<double>* weights,
+                                 bool verbose) {
+        return this->eval_on_data_set(data_set,
+                                      nullptr,
+                                      weights,
+                                      true,
+                                      verbose);
+    }
+
+    double MSE::eval(std::vector<double>* weights,
+                     bool denormalize_data,
+                     bool verbose) {
+        return this->eval_on_data_set(this->ds,
+                                      nullptr,
+                                      weights,
+                                      denormalize_data,
+                                      verbose);
+    }
+
+    double MSE::eval_on_test_data(std::vector<double>* weights,
+                                  bool verbose) {
+        return this->eval_on_data_set(this->ds_test,
+                                      weights,
+                                      verbose);
+    }
+
+    double MSE::eval_on_test_data(std::string results_file_path,
+                                  std::vector<double>* weights,
+                                  bool verbose) {
+        return this->eval_on_data_set(this->ds_test,
+                                      results_file_path,
+                                      weights,
+                                      verbose);
+    }
+
+    double MSE::eval_on_test_data(std::ofstream* results_file_path,
+                                  std::vector<double>* weights,
+                                  bool verbose) {
+        return this->eval_on_data_set(this->ds_test,
+                                      results_file_path,
+                                      weights,
+                                      true,
+                                      verbose);
+    }
+
+    void
+    MSE::calculate_error_gradient(std::vector<double>& params,
+                                  std::vector<double>& grad,
+                                  double alpha,
+                                  size_t batch) {
+
         size_t dim_out = this->ds->get_output_dim();
-//    unsigned int dim_in = this->ds->get_input_dim();
         size_t n_elements = this->ds->get_n_elements();
-        double error = 0.0, val;
-
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
 
-//    //TODO instead use something smarter
-//    this->net->copy_weights(weights);
+        if (batch > 0) {
+            *data = this->ds->get_random_data_batch(batch);
+            n_elements = data->size();
+        }
+        std::vector<double> error_derivative(dim_out);
 
-        std::vector<double> output(dim_out);
 
         for (auto el: *data) {  // Iterate through every element in the test set
 
-            this->net->eval_single(el.first, output,
-                                   weights);  // Compute the net output and store it into 'output' variable
-
-            for (size_t j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
+            this->net->eval_single(el.first, error_derivative,
+                                   &params);  // Compute the net output and store it into 'output' variable
 
-                val = output.at(j) - el.second.at(j);
-                error += val * val;
+            for (size_t j = 0; j < dim_out; ++j) {
+                error_derivative[j] = 2.0 * (error_derivative[j] - el.second[j]); //real - expected result
             }
+
+            this->net->add_to_gradient_single(el.first,
+                                              error_derivative,
+                                              alpha / n_elements,
+                                              grad);
         }
-        return sqrt(error) / n_elements;
     }
 
-    double MSE::eval_on_test_data(std::vector<double>* weights) {
-        return this->eval_on_data_set(this->ds_test, weights);
-    }
+    double MSE::calculate_single_residual(std::vector<double>* input,
+                                          std::vector<double>* output,
+                                          std::vector<double>* parameters) {
 
-    double MSE::eval_on_test_data(std::string results_file_path, std::vector<double>* weights) {
-        return this->eval_on_data_set(this->ds_test, results_file_path, weights);
-    }
+        //TODO maybe move to the general ErrorFunction
+        //TODO check input vector sizes - they HAVE TO be allocated before calling this function
 
-    double MSE::eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights) {
-        return this->eval_on_data_set(this->ds_test, results_file_path, weights);
+        return -this->eval_on_single_input(input, output, parameters);
     }
 
-    void
-    MSE::calculate_error_gradient(std::vector<double>& params, std::vector<double>& grad, double alpha, size_t batch) {
+    void MSE::calculate_residual_gradient(std::vector<double>* input,
+                                          std::vector<double>* output,
+                                          std::vector<double>* gradient,
+                                          double h) {
 
-        size_t dim_out = this->ds->get_output_dim();
-        size_t n_elements = this->ds->get_n_elements();
-        std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
+        //TODO check input vector sizes - they HAVE TO be allocated before calling this function
 
-        if (batch > 0) {
-            *data = this->ds->get_random_data_batch(batch);
-            n_elements = data->size();
-        }
-        std::vector<double> error_derivative(dim_out);
+        size_t n_parameters = this->get_dimension();
+        std::vector<double>* parameters = this->get_parameters();
 
+        double delta;  // Complete step size
+        double former_parameter_value;
+        double f_val1;  // f(x + delta)
+        double f_val2;  // f(x - delta)
 
-        for (auto el: *data) {  // Iterate through every element in the test set
+        for (size_t i = 0; i < n_parameters; i++) {
+            delta = h * (1 + std::abs(parameters->at(i)));
+            former_parameter_value = parameters->at(i);
 
-            this->net->eval_single(el.first, error_derivative,
-                                   &params);  // Compute the net output and store it into 'output' variable
+            if(delta != 0) {
+                /* Computation of f_val1 = f(x + delta) */
+                parameters->at(i) = former_parameter_value + delta;
+                f_val1 = this->calculate_single_residual(input, output, parameters);
 
-            for (size_t j = 0; j < dim_out; ++j) {
-                error_derivative[j] = 2.0 * (error_derivative[j] - el.second[j]); //real - expected result
+                /* Computation of f_val2 = f(x - delta) */
+                parameters->at(i) = former_parameter_value - delta;
+                f_val2 = this->calculate_single_residual(input, output, parameters);
+
+                gradient->at(i) = (f_val1 - f_val2) / (2*delta);
             }
 
-            this->net->add_to_gradient_single(el.first, error_derivative, alpha / n_elements, grad);
+            /* Restore parameter to the former value */
+            parameters->at(i) = former_parameter_value;
         }
     }
 
@@ -542,7 +515,8 @@ namespace lib4neuro {
         }
     }
 
-    double ErrorSum::eval_on_test_data(std::vector<double>* weights) {
+    double ErrorSum::eval_on_test_data(std::vector<double>* weights,
+                                       bool verbose) {
         //TODO take care of the case, when there are no test data
 
         double output = 0.0;
@@ -559,37 +533,50 @@ namespace lib4neuro {
         return output;
     }
 
-    double ErrorSum::eval_on_test_data(std::string results_file_path, std::vector<double>* weights) {
+    double ErrorSum::eval_on_test_data(std::string results_file_path,
+                                       std::vector<double>* weights,
+                                       bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
         return -1;
     }
 
-    double ErrorSum::eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights) {
+    double ErrorSum::eval_on_test_data(std::ofstream* results_file_path,
+                                       std::vector<double>* weights,
+                                       bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
         return -1;
     }
 
-    double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::vector<double>* weights) {
+    double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
+                                      std::vector<double>* weights,
+                                      bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
         return -1;
     }
 
-    double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::string results_file_path,
-                                      std::vector<double>* weights) {
+    double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
+                                      std::string results_file_path,
+                                      std::vector<double>* weights,
+                                      bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
         return -1;
     }
 
-    double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::ofstream* results_file_path,
-                                      std::vector<double>* weights) {
+    double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
+                                      std::ofstream* results_file_path,
+                                      std::vector<double>* weights,
+                                      bool denormalize_data,
+                                      bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
         return -1;
     }
 
-    double ErrorSum::eval(std::vector<double>* weights) {
+    double ErrorSum::eval(std::vector<double>* weights,
+                          bool denormalize_data,
+                          bool verbose) {
         double output = 0.0;
         ErrorFunction* ef = nullptr;
 
@@ -698,4 +685,19 @@ namespace lib4neuro {
     };
 
 
+    void ErrorSum::calculate_residual_gradient(std::vector<double>* input,
+                                                             std::vector<double>* output,
+                                                             std::vector<double>* gradient,
+                                                             double h) {
+        THROW_NOT_IMPLEMENTED_ERROR();
+    }
+
+    double ErrorSum::calculate_single_residual(std::vector<double>* input,
+                                                             std::vector<double>* output,
+                                                             std::vector<double>* parameters) {
+        THROW_NOT_IMPLEMENTED_ERROR();
+
+        return 0;
+    }
+
 }
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 6f2beda9804fc216886ca7eb7059a86afb000da4..87fcfc2d8313bba4d1b8b1ae083f9cdbcf12c2ed 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -14,11 +14,11 @@
 
 namespace lib4neuro {
 
+    //TODO write smarter using ErrorFunction abstract class?
     enum ErrorFunctionType {
         ErrorFuncMSE
     };
 
-
     class ErrorFunction {
     public:
 
@@ -27,7 +27,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval(std::vector<double>* weights = nullptr) = 0;
+        virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false,
+                bool verbose = false) = 0;
 
         /**
          *
@@ -99,7 +100,7 @@ namespace lib4neuro {
         /**
          *
          */
-        virtual double eval_on_test_data(std::vector<double>* weights = nullptr) = 0;
+        virtual double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) = 0;
 
         /**
          *
@@ -107,7 +108,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr) = 0;
+        virtual double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr,
+                bool verbose = false) = 0;
 
         /**
          *
@@ -115,7 +117,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr) = 0;
+        virtual double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr,
+                bool verbose = false) = 0;
 
         /**
          *
@@ -123,7 +126,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr) = 0;
+        virtual double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr,
+                bool verbose = false) = 0;
 
         /**
          *
@@ -133,7 +137,8 @@ namespace lib4neuro {
          * @return
          */
         virtual double
-        eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights = nullptr) = 0;
+        eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights = nullptr,
+                bool verbose = false) = 0;
 
         /**
          *
@@ -142,8 +147,11 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path,
-                                        std::vector<double>* weights = nullptr) = 0;
+        virtual double eval_on_data_set(DataSet* data_set,
+                                        std::ofstream* results_file_path = nullptr,
+                                        std::vector<double>* weights = nullptr,
+                                        bool denormalize_data = true,
+                                        bool verbose = false) = 0;
 
         /**
          *
@@ -161,6 +169,31 @@ namespace lib4neuro {
          */
         virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) = 0;
 
+        /**
+         *
+         * @param input
+         * @param output
+         * @param gradient
+         * @param h
+         */
+        virtual void
+        calculate_residual_gradient(std::vector<double>* input,
+                                    std::vector<double>* output,
+                                    std::vector<double>* gradient,
+                                    double h = 1e-3) = 0;
+
+        /**
+         *
+         * @param input
+         * @param output
+         * @param parameters
+         * @return
+         */
+        virtual double
+        calculate_single_residual(std::vector<double>* input,
+                                  std::vector<double>* output,
+                                  std::vector<double>* parameters = nullptr) = 0;
+
     protected:
 
         /**
@@ -189,8 +222,7 @@ namespace lib4neuro {
         DataSet* ds_test = nullptr;
     };
 
-
-    class MSE : public ErrorFunction {
+    class MSE : public ErrorFunction{
 
     public:
         /**
@@ -205,7 +237,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval(std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
+                                  bool denormalize_data = false,
+                                  bool verbose = false) override;
 
         /**
          *
@@ -233,12 +267,47 @@ namespace lib4neuro {
                                  double alpha = 1.0,
                                  size_t batch = 0) override;
 
+        /**
+         * Evaluates the function f(x) = 0 - MSE(x) for a
+         * specified input x
+         *
+         * @param input
+         * @return
+         */
+        LIB4NEURO_API
+        virtual double calculate_single_residual(std::vector<double>* input,
+                                                 std::vector<double>* output,
+                                                 std::vector<double>* parameters) override;
+
+        /**
+         * Compute gradient of the residual function f(x) = 0 - MSE(x) for a specific input x.
+         * The method uses the central difference method.
+         *
+         * @param[in] input Vector being a single input
+         * @param[out] gradient Resulting gradient
+         * @param[in] h Step used in the central difference
+         */
+        LIB4NEURO_API void
+        calculate_residual_gradient(std::vector<double>* input,
+                                    std::vector<double>* output,
+                                    std::vector<double>* gradient,
+                                    double h=1e-3) override;
+
+        /**
+         *
+         * @param input
+         * @return
+         */
+        LIB4NEURO_API double eval_on_single_input(std::vector<double>* input,
+                                                  std::vector<double>* output,
+                                                  std::vector<double>* weights = nullptr);
+
         /**
          *
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override;
 
         /**
          *
@@ -246,7 +315,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr);
+        LIB4NEURO_API double eval_on_test_data(std::string results_file_path = nullptr,
+                                               std::vector<double>* weights = nullptr,
+                                               bool verbose = false);
 
         /**
          *
@@ -254,7 +325,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path,
+                                               std::vector<double>* weights = nullptr,
+                                               bool verbose = false) override;
 
         /**
          *
@@ -265,7 +338,9 @@ namespace lib4neuro {
          */
         LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
                                               std::ofstream* results_file_path,
-                                              std::vector<double>* weights = nullptr) override;
+                                              std::vector<double>* weights = nullptr,
+                                              bool denormalize_data = false,
+                                              bool verbose = false) override;
 
         /**
          *
@@ -273,7 +348,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
+                                              std::vector<double>* weights = nullptr,
+                                              bool verbose = false) override;
 
         /**
          *
@@ -282,8 +359,10 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::string results_file_path,
-                                              std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
+                                              std::string results_file_path,
+                                              std::vector<double>* weights = nullptr,
+                                              bool verbose = false) override;
 
         /**
          *
@@ -319,14 +398,16 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval(std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
+                                  bool denormalize_data = false,
+                                  bool verbose = false);
 
         /**
          *
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override;
 
         /**
          *
@@ -334,7 +415,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_test_data(std::string results_file_path,
+                                               std::vector<double>* weights = nullptr,
+                                               bool verbose = false) override;
 
         /**
          *
@@ -342,7 +425,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path,
+                                               std::vector<double>* weights = nullptr,
+                                               bool verbose = false) override;
 
         /**
          *
@@ -350,7 +435,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
+                                              std::vector<double>* weights = nullptr,
+                                              bool verbose = false) override;
 
         /**
          *
@@ -359,8 +446,10 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::string results_file_path,
-                                              std::vector<double>* weights = nullptr) override;
+        LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
+                                              std::string results_file_path,
+                                              std::vector<double>* weights = nullptr,
+                                              bool verbose = false) override;
 
         /**
          *
@@ -371,7 +460,9 @@ namespace lib4neuro {
          */
         LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
                                               std::ofstream* results_file_path,
-                                              std::vector<double>* weights = nullptr) override;
+                                              std::vector<double>* weights = nullptr,
+                                              bool denormalize_data = true,
+                                              bool verbose = false) override;
 
         /**
          *
@@ -425,6 +516,19 @@ namespace lib4neuro {
                                  std::vector<double>& grad,
                                  double alpha = 1.0,
                                  size_t batch = 0) override;
+
+        LIB4NEURO_API void
+        calculate_residual_gradient(std::vector<double>* input,
+                                    std::vector<double>* output,
+                                    std::vector<double>* gradient,
+                                    double h = 1e-3) override;
+
+        LIB4NEURO_API double
+        calculate_single_residual(std::vector<double>* input,
+                                  std::vector<double>* output,
+                                  std::vector<double>* parameters = nullptr) override;
+
+
         /**
          *
          * @return
@@ -437,11 +541,10 @@ namespace lib4neuro {
          */
         LIB4NEURO_API DataSet* get_dataset() override;
 
-    private:
+    protected:
         std::vector<ErrorFunction*>* summand;
         std::vector<double>* summand_coefficient;
     };
-
 }
 
 #endif //INC_4NEURO_ERRORFUNCTION_H
diff --git a/src/ErrorFunction/ErrorFunctionsMock.h b/src/ErrorFunction/ErrorFunctionsMock.h
new file mode 100644
index 0000000000000000000000000000000000000000..602681853579e67ba810d60bc540e5efe7a688be
--- /dev/null
+++ b/src/ErrorFunction/ErrorFunctionsMock.h
@@ -0,0 +1,36 @@
+//
+// Created by martin on 3.2.19.
+//
+
+#ifndef LIB4NEURO_ERRORFUNCTIONSMOCK_H
+#define LIB4NEURO_ERRORFUNCTIONSMOCK_H
+
+#include "../ErrorFunction/ErrorFunctions.h"
+#include "../DataSet/DataSet.h"
+
+#include <turtle/mock.hpp>
+
+using namespace lib4neuro;
+
+
+MOCK_BASE_CLASS(mock_ErrorFunction, lib4neuro::ErrorFunction)
+{
+    MOCK_METHOD(eval, 3)
+    MOCK_METHOD(get_dimension, 0)
+    MOCK_METHOD(calculate_error_gradient, 4)
+    MOCK_METHOD(calculate_residual_gradient, 4)
+    MOCK_METHOD(calculate_single_residual, 3)
+    MOCK_METHOD(get_parameters, 0)
+    MOCK_METHOD(get_dataset, 0)
+    MOCK_METHOD(get_network_instance, 0)
+    MOCK_METHOD(divide_data_train_test, 1)
+    MOCK_METHOD(return_full_data_set_for_training, 0)
+    MOCK_METHOD(eval_on_test_data, 2, double(std::vector<double>*, bool), id1)
+    MOCK_METHOD(eval_on_test_data, 3, double(std::string, std::vector<double>*, bool), id2)
+    MOCK_METHOD(eval_on_test_data, 3, double(std::ofstream*, std::vector<double>*, bool), id3)
+    MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::vector<double>*, bool), id4)
+    MOCK_METHOD(eval_on_data_set, 4, double(DataSet*, std::string, std::vector<double>*, bool), id5)
+    MOCK_METHOD(eval_on_data_set, 5, double(DataSet*, std::ofstream*, std::vector<double>*, bool, bool), id6)
+};
+
+#endif //LIB4NEURO_ERRORFUNCTIONSMOCK_H
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index bd75a317f59b47a4e96f7bb7b9e253ad4ec98f23..352def273f96e59c5098d0e977d0a6548493eaa6 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -10,14 +10,14 @@
 
 #include "../settings.h"
 #include "../constants.h"
-#include "ILearningMethods.h"
+#include "LearningMethod.h"
 #include "../ErrorFunction/ErrorFunctions.h"
 
 namespace lib4neuro {
     /**
      *
      */
-    class GradientDescent : public ILearningMethods {
+    class GradientDescent : public GradientLearningMethod {
 
     private:
 
@@ -26,11 +26,6 @@ namespace lib4neuro {
          */
         double tolerance;
 
-        /**
-         *
-         */
-        double max_error;
-
         /**
          * Number of iterations to reset step size to tolerance/10.0
          */
@@ -41,11 +36,6 @@ namespace lib4neuro {
          */
 		size_t batch;
 
-		/**
-		 *
-		 */
-		size_t iter_max;
-
         /**
          * Maximal number of iterations - optimization will stop after that, even if not converged
          */
@@ -128,7 +118,6 @@ namespace lib4neuro {
          */
         LIB4NEURO_API std::vector<double> *get_parameters() override;
     };
-
 }
 
 #endif //INC_4NEURO_GRADIENTDESCENT_H
diff --git a/src/LearningMethods/GradientDescentBB.h b/src/LearningMethods/GradientDescentBB.h
index b93b469fb28e75935eaff862a75ac0fecdb21679..0874fa074bbb19d42d916fae60715216bfae9dfa 100644
--- a/src/LearningMethods/GradientDescentBB.h
+++ b/src/LearningMethods/GradientDescentBB.h
@@ -11,14 +11,14 @@
 
 #include "../settings.h"
 #include "../constants.h"
-#include "ILearningMethods.h"
+#include "LearningMethod.h"
 #include "../ErrorFunction/ErrorFunctions.h"
 
 namespace lib4neuro {
     /**
      *
      */
-    class GradientDescentBB : public ILearningMethods {
+    class GradientDescentBB : public GradientLearningMethod {
 
     private:
 
diff --git a/src/LearningMethods/GradientDescentSingleItem.h b/src/LearningMethods/GradientDescentSingleItem.h
index 73ebf17b2ec8198a177dd25d3ba569f48659caba..b27d1e62ad884aa48001b1f3398a84a75e9b55a3 100644
--- a/src/LearningMethods/GradientDescentSingleItem.h
+++ b/src/LearningMethods/GradientDescentSingleItem.h
@@ -11,7 +11,7 @@
 
 #include "../settings.h"
 #include "../constants.h"
-#include "ILearningMethods.h"
+#include "LearningMethod.h"
 #include "../ErrorFunction/ErrorFunctions.h"
 #include "GradientDescentBB.h"
 
@@ -19,7 +19,7 @@ namespace lib4neuro {
     /**
      *
      */
-    class GradientDescentSingleItem : public ILearningMethods {
+    class GradientDescentSingleItem : public GradientLearningMethod {
 
     private:
 
diff --git a/src/LearningMethods/ILearningMethods.cpp b/src/LearningMethods/ILearningMethods.cpp
deleted file mode 100644
index 6aa47daf102bea9655ef0dc8f33c9dce54073092..0000000000000000000000000000000000000000
--- a/src/LearningMethods/ILearningMethods.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/**
- * DESCRIPTION OF THE FILE
- *
- * @author Michal KravÄŤenko
- * @date 10.9.18 -
- */
-
-#include "ILearningMethods.h"
-
diff --git a/src/LearningMethods/ILearningMethods.h b/src/LearningMethods/ILearningMethods.h
deleted file mode 100644
index 80c1a939f8d9f173476f81c79d407ba0fe3d6cda..0000000000000000000000000000000000000000
--- a/src/LearningMethods/ILearningMethods.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * This file contains an interface for all learning methods in the library
- *
- * @author Michal KravÄŤenko
- * @date 12.8.18 -
- */
-
-#ifndef LIB4NEURO_ILEARNINGMETHODS_H
-#define LIB4NEURO_ILEARNINGMETHODS_H
-
-#include <vector>
-#include "../ErrorFunction/ErrorFunctions.h"
-
-
-namespace lib4neuro {
-    enum LearningMethodType {
-        MethodGradientDescent,
-        MethodParticleSwarm
-    };
-}
-
-class ILearningMethods {
-private:
-
-    /**
-     *
-     */
-    lib4neuro::ErrorFunction *ef = nullptr;
-
-
-
-public:
-    /**
-     * Runs the method specific learning algorithm minimizing the given error function
-     */
-    virtual void optimize( lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr ) = 0;
-
-     /**
-     * Updates the optimal weight&bias settings in the passed vector
-     */
-    virtual std::vector<double>* get_parameters( ) = 0;
-};
-
-
-#endif //LIB4NEURO_ILEARNINGMETHODS_H
diff --git a/src/LearningMethods/LearningMethod.h b/src/LearningMethods/LearningMethod.h
new file mode 100644
index 0000000000000000000000000000000000000000..1963d63f0a1dfd57e2c1e458f03ec4d3fa2f4351
--- /dev/null
+++ b/src/LearningMethods/LearningMethod.h
@@ -0,0 +1,41 @@
+/**
+ * This file contains an interface for all learning methods in the library
+ *
+ * @author Michal KravÄŤenko
+ * @date 12.8.18 -
+ */
+
+#ifndef LIB4NEURO_ILEARNINGMETHODS_H
+#define LIB4NEURO_ILEARNINGMETHODS_H
+
+#include <vector>
+#include "../ErrorFunction/ErrorFunctions.h"
+
+
+namespace lib4neuro {
+    class LearningMethod {
+    public:
+        /**
+         * Runs the method specific learning algorithm minimizing the given error function
+         */
+        virtual void optimize(lib4neuro::ErrorFunction& ef,
+                              std::ofstream* ofs = nullptr) = 0;
+
+        /**
+         * Updates the optimal weight&bias settings in the passed vector
+         */
+        virtual std::vector<double>* get_parameters() = 0;
+    };
+
+    class GradientLearningMethod : public LearningMethod {
+    public:
+        /**
+         * Runs the method specific learning algorithm minimizing the given error function
+         */
+        virtual void optimize(ErrorFunction& ef,
+                              std::ofstream* ofs = nullptr) override;
+
+    };
+}
+
+#endif //LIB4NEURO_ILEARNINGMETHODS_H
diff --git a/src/LearningMethods/LearningMethods.cpp b/src/LearningMethods/LearningMethods.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8c4b3b48a97a13d3cec656e2e9226aa847d9d667
--- /dev/null
+++ b/src/LearningMethods/LearningMethods.cpp
@@ -0,0 +1,15 @@
+/**
+ * DESCRIPTION OF THE FILE
+ *
+ * @author Michal KravÄŤenko
+ * @date 10.9.18 -
+ */
+
+#include "LearningMethod.h"
+
+namespace lib4neuro {
+    void GradientLearningMethod::optimize(ErrorFunction& ef,
+                                          std::ofstream* ofs) {
+        this->optimize(ef, ofs);
+    }
+}
\ No newline at end of file
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index c0b786ea4eac1c6b9639e39c89eedfcdf85197fa..fbfa7f8014599a09ce033a624292d20aa3171f39 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -27,7 +27,7 @@ namespace lib4neuro {
         return nullptr;
     }
 
-    void LearningSequence::add_learning_method(ILearningMethods *method) {
+    void LearningSequence::add_learning_method(LearningMethod *method) {
         this->learning_sequence.push_back( method );
     }
 
diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h
index ef38950bc406b9faeecef831691fc7315a3dc490..bb97591093d0504099fb1bf078b4ab3629679297 100644
--- a/src/LearningMethods/LearningSequence.h
+++ b/src/LearningMethods/LearningSequence.h
@@ -11,20 +11,20 @@
 #include <4neuro.h>
 #include "../settings.h"
 #include "../constants.h"
-#include "ILearningMethods.h"
+#include "LearningMethod.h"
 
 namespace lib4neuro {
     /**
      *
      */
-    class LearningSequence : public ILearningMethods {
+    class LearningSequence : public LearningMethod {
 
     private:
 
         /**
          *
          */
-        std::vector<ILearningMethods*> learning_sequence;
+        std::vector<LearningMethod*> learning_sequence;
 
         /**
          *
@@ -71,7 +71,7 @@ namespace lib4neuro {
          *
          * @param method
          */
-        LIB4NEURO_API void add_learning_method( ILearningMethods * method );
+        LIB4NEURO_API void add_learning_method( LearningMethod * method );
     };
 
 }
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..28e811bac7103c053893897c10470fb69b797da6
--- /dev/null
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -0,0 +1,216 @@
+#include <armadillo>
+
+#include "LevenbergMarquardt.h"
+#include "../message.h"
+
+struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
+    /**
+ * Threshold for the successful ending of the optimization - deviation from minima
+ */
+    double tolerance;
+
+    double tolerance_gradient;
+
+    double tolerance_parameters;
+
+    double LM_step_acceptance_threshold;
+
+    double lambda_initial;
+
+    double lambda_increase;
+
+    double lambda_decrease;
+
+    /**
+     * Maximal number of iterations - optimization will stop after that, even if not converged
+     */
+    long long int maximum_niters;
+
+    /**
+     * Vector of minimum coordinates
+     */
+    std::vector<double> *optimal_parameters;
+
+    /**
+     * Returning Jacobian matrix of the residual function using the central differences method, i.e.
+     * f'(x) = (f(x + delta) - f(x - delta)) / 2*delta
+     *
+     * @param h Step size
+     * @return Jacobian matrix
+     */
+    arma::Mat<double>* get_Jacobian_matrix(lib4neuro::ErrorFunction& ef,
+                                           arma::Mat<double>* J,
+                                           double h=1e-3);
+};
+
+arma::Mat<double>* lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl::get_Jacobian_matrix(
+        lib4neuro::ErrorFunction& ef,
+        arma::Mat<double>* J,
+        double h) {
+
+    size_t n_parameters = ef.get_dimension();
+    size_t n_data_points = ef.get_dataset()->get_n_elements();
+    std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = ef.get_dataset()->get_data();
+    std::vector<double> grad(n_parameters);
+    std::vector<double> *parameters = ef.get_parameters();
+    std::vector<double> error_scaling(ef.get_network_instance()->get_n_outputs());
+    std::fill(error_scaling.begin(), error_scaling.end(), 1.0);
+
+    for(size_t i = 0; i < n_data_points; i++) {
+
+        std::fill(grad.begin(), grad.end(), 0.0);
+        ef.get_network_instance()->add_to_gradient_single(data->at(i).first, error_scaling, 1.0, grad);
+
+        for(size_t j = 0; j < n_parameters; j++) {
+            J->at(i, j) = grad.at(j);
+        }
+    }
+
+    return J;
+}
+
+namespace lib4neuro {
+    LevenbergMarquardt::LevenbergMarquardt(int max_iters,
+                                           double tolerance,
+                                           double tolerance_gradient,
+                                           double tolerance_parameters,
+                                           double LM_step_acceptance_threshold,
+                                           double lambda_initial,
+                                           double lambda_increase,
+                                           double lambda_decrease) : p_impl(new LevenbergMarquardtImpl()) {
+
+        this->p_impl->tolerance = tolerance;
+        this->p_impl->tolerance_gradient = tolerance_gradient;
+        this->p_impl->tolerance_parameters = tolerance_parameters;
+        this->p_impl->LM_step_acceptance_threshold = LM_step_acceptance_threshold;
+        this->p_impl->lambda_initial = lambda_initial;
+        this->p_impl->lambda_increase = lambda_increase;
+        this->p_impl->lambda_decrease = lambda_decrease;
+        this->p_impl->maximum_niters = max_iters;
+    }
+
+    void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
+                                      std::ofstream* ofs) {
+        optimize(ef, LM_UPDATE_TYPE::MARQUARDT, ofs);
+    }
+
+    void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
+                                      lib4neuro::LM_UPDATE_TYPE update_type,
+                                      std::ofstream* ofs) {
+
+        /* Copy data set max and min values, if it's normalized */
+        if(ef.get_dataset()->is_normalized()) {
+            ef.get_network_instance()->set_normalization_strategy_instance(
+                    ef.get_dataset()->get_normalization_strategy());
+        }
+
+        double current_err = ef.eval();
+
+        COUT_INFO("Finding a solution via a Levenberg-Marquardt method with adaptive step-length... Starting error: " << current_err << std::endl);
+        if(ofs && ofs->is_open()) {
+            *ofs << "Finding a solution via a Levenberg-Marquardt method with adaptive step-length... Starting error: " << current_err << std::endl;
+        }
+
+        size_t n_parameters = ef.get_dimension();
+        size_t n_data_points = ef.get_dataset()->get_n_elements();
+        std::vector<double> *gradient_current = new std::vector<double>(n_parameters);
+        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
+        std::vector<double> *gradient_prev = new std::vector<double>(n_parameters);
+        std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
+        std::vector<double> *params_current = ef.get_parameters();
+        std::vector<double> *params_tmp = new std::vector<double>(n_parameters);
+        arma::Mat<double> J(n_data_points, n_parameters);  // Jacobian matrix
+        arma::Mat<double> H(n_data_points, n_parameters);  // Hessian matrix
+        arma::Mat<double> H_new(n_data_points, n_parameters);
+
+        std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = ef.get_dataset()->get_data();
+
+        double lambda = this->p_impl->lambda_initial;  // Dumping parameter
+        double prev_err;
+
+
+        bool update_J = true;
+        arma::Col<double> update;
+        std::vector<double> d_prep(n_data_points);
+        arma::Col<double>* d;
+
+        //-------------------//
+        // Solver iterations //
+        //-------------------//
+        size_t iter_counter = 0;
+        do {
+            if(update_J) {
+                /* Get Jacobian matrix */
+                //TODO upravit tak aby nebylo zavisle na pouzite chybove funkci
+                this->p_impl->get_Jacobian_matrix(ef, &J);
+
+                /* Get approximation of Hessian (H ~ J'*J) */
+                H = J.t() * J;
+
+                /* Evaluate the error before updating parameters */
+                prev_err = ef.eval();
+            }
+
+            /* H_new = H + lambda*I */
+            H_new = H + lambda * arma::eye<arma::Mat<double>>(n_parameters, n_parameters);
+
+            /* Compute the residual vector */
+            for(size_t i = 0; i < n_data_points; i++) {
+                //TODO upravit aby nebylo zavisle na pouzite chybove funkci
+                d_prep.at(i) = ef.calculate_single_residual(&data->at(i).first, &data->at(i).second);
+            }
+            d = new arma::Col<double> (d_prep);
+
+            /* Compute the update vector */
+            update = -H_new.i()*(J.t()*(*d));
+
+            /* Compute the error after update of parameters */
+            for(size_t i = 0; i < n_parameters; i++) {
+                params_tmp->at(i) = update.at(i);
+            }
+            current_err = ef.eval(params_tmp);
+
+            /* Check, if the parameter update improved the function */
+            if(current_err < prev_err) {
+
+                /* If the convergence threshold is achieved, finish the computation */
+                if(current_err < this->p_impl->tolerance) {
+                    break;
+                }
+
+                /* If the error is lowered after parameter update, accept the new parameters and lower the damping
+                 * factor lambda */
+
+                //TODO rewrite without division!
+                lambda /= this->p_impl->lambda_decrease;
+
+                for(size_t i = 0; i < n_parameters; i++) {
+                    params_current->at(i) = params_tmp->at(i);
+                }
+
+                prev_err = current_err;
+                update_J = true;
+
+//                COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << std::endl);
+
+            } else {
+                /* If the error after parameters update is not lower, increase the damping factor lambda */
+                update_J = false;
+                lambda *= this->p_impl->lambda_increase;
+            }
+
+            COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << std::endl);
+
+
+        }while(iter_counter++ < this->p_impl->maximum_niters);
+
+        /* Store the optimized parameters */
+        this->p_impl->optimal_parameters = params_current;
+    }
+
+    std::vector<double>* LevenbergMarquardt::get_parameters() {
+        return this->p_impl->optimal_parameters;
+    }
+
+    LevenbergMarquardt::~LevenbergMarquardt() = default;
+}
\ No newline at end of file
diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h
new file mode 100644
index 0000000000000000000000000000000000000000..8473c9e6f4f0a7df212113d24cf810f15b413f99
--- /dev/null
+++ b/src/LearningMethods/LevenbergMarquardt.h
@@ -0,0 +1,56 @@
+//
+// Created by martin on 9.2.19.
+//
+
+#ifndef LIB4NEURO_LEVENBERGMARQUARDT_H
+#define LIB4NEURO_LEVENBERGMARQUARDT_H
+
+#include <memory>
+
+#include "LearningMethod.h"
+
+namespace lib4neuro {
+
+    enum LM_UPDATE_TYPE {
+        MARQUARDT,
+        QUADRATIC,
+        NIELSEN
+    };
+
+    /**
+     * Method implementing Levenberg-Marquardt optimization algorithm
+     *
+     * This version is described in the paper:
+     * Gavin, Henri. "The Levenberg-Marquardt method for nonlinear least squares curve-fitting problems."
+     * Department of Civil and Environmental Engineering, Duke University (2011): 1-15.
+     */
+    class LevenbergMarquardt : public GradientLearningMethod {
+
+    private:
+        struct LevenbergMarquardtImpl;
+        std::unique_ptr<LevenbergMarquardtImpl> p_impl;
+
+    public:
+        LevenbergMarquardt(int max_iters,
+                           double tolerance=1e-2,
+                           double tolerance_gradient=1e-3,
+                           double tolerance_parameters=1e-3,
+                           double LM_step_acceptance_threshold=1e-1,
+                           double lambda_initial=1e-2,
+                           double lambda_increase=11,
+                           double lambda_decrease=9);
+
+        void optimize(ErrorFunction &ef, std::ofstream* ofs = nullptr);
+
+        void optimize(ErrorFunction &ef,
+                      LM_UPDATE_TYPE update_type,
+                      std::ofstream* ofs = nullptr);
+
+        std::vector<double>* get_parameters() override;
+
+        ~LevenbergMarquardt();
+    };
+
+}
+
+#endif //LIB4NEURO_LEVENBERGMARQUARDT_H
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index e8c3154df1b1795a0784c736c2a44918bccc21e7..05c41dde55319cef9b1082381355f6b41732471d 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -209,30 +209,12 @@ namespace lib4neuro {
                     "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
         }
 
-        this->c1 = c1;
-
-        this->c2 = c2;
-
-        this->c3 = (c1 + c2) / 2.0;
-
-        this->w = w;
-
         this->gamma = gamma;
-
         this->epsilon = epsilon;
-
         this->delta = delta;
+        this->pst = PARTICLE_SWARM_TYPE::GENERAL;
 
-        this->n_particles = n_particles;
-
-        this->iter_max = iter_max;
-
-        this->particle_swarm = new std::vector<Particle *>(this->n_particles);
-
-        this->domain_bounds = domain_bounds;
-
-        std::fill(this->particle_swarm->begin(), this->particle_swarm->end(), nullptr);
-
+        this->init_constructor(domain_bounds, c1, c2, w, n_particles, iter_max);
     }
 
     ParticleSwarm::~ParticleSwarm() {
@@ -260,6 +242,8 @@ namespace lib4neuro {
     void ParticleSwarm::optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs) {
         //TODO add output to the 'ofs'
 
+        COUT_INFO("Finding optima via Globalized Particle Swarm method..." << std::endl);
+
         /* Copy data set max and min values, if it's normalized */
         if(ef.get_dataset()->is_normalized()) {
             ef.get_network_instance()->set_normalization_strategy_instance(
@@ -302,6 +286,7 @@ namespace lib4neuro {
         double max_vel_step = 0;
         double prev_max_vel_step;
         double euclidean_dist;
+        double current_err = -1;
 
         this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
 //    for(unsigned int i = 0; i < this->n_particles; ++i){
@@ -383,23 +368,41 @@ namespace lib4neuro {
 //            }
 //        }
 
+            current_err = ef.eval(this->p_min_glob);
+
             COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
-                                                  << ". Total error: " << optimal_value
-                                                  << ".\r" );
+                                                  << ". Total error: " << current_err
+                                                  << ". Objective function value: " << optimal_value
+                                                  << ".\r");
+
+            if(this->err_thresh) {
+
+                /* If the error threshold is given, then check the current error */
+                if(current_err <= this->err_thresh) {
+                    break;
+                }
+            } else {
 
-            /* Check if the particles are near to each other AND the maximal velocity is less than 'gamma' */
-            if (cluster.size() >= this->delta * this->n_particles && prev_max_vel_step <= this->gamma * max_vel_step) {
-                break;
+                /* Check if the particles are near to each other AND the maximal velocity is less than 'gamma' */
+                if (cluster.size() >= this->delta * this->n_particles &&
+                    prev_max_vel_step <= this->gamma * max_vel_step) {
+                    break;
+                }
             }
 
             outer_it++;
+
+            //TODO parameter for inertia weight decrease?
 //        this->w *= 0.99;
+
         }
         COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
-                                              << ". Total error: " << optimal_value
-                                              << ".\n" );
+                                              << ". Total error: " << current_err
+                                              << ". Objective function value: " << optimal_value
+                                              << "." << std::endl );
 
         this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
+        //TODO rewrite following output using COUT_INFO
         if (outer_it < this->iter_max) {
             /* Convergence reached */
             COUT_INFO( std::endl << "Found optimum in "  <<  outer_it << " iterations. Objective function value: " << optimal_value << std::endl);
@@ -407,18 +410,31 @@ namespace lib4neuro {
             /* Maximal number of iterations reached */
             COUT_INFO( std::endl << "Max number of iterations reached ("  <<  outer_it << ")!  Objective function value: " << optimal_value <<std:: endl);
         }
-//    for (size_t i = 0; i <= this->func_dim - 1; ++i) {
-//        printf("%10.8f \n", (*this->p_min_glob)[i]);
-//    }
-//
-
-        ef.eval( this->get_parameters() );
 
         ef.eval( this->get_parameters() );
 
         delete centroid;
     }
 
+    ParticleSwarm::ParticleSwarm(std::vector<double>* domain_bounds,
+                                 double err_thresh,
+                                 PARTICLE_SWARM_TYPE pst,
+                                 double c1,
+                                 double c2,
+                                 double w,
+                                 size_t n_particles,
+                                 size_t iter_max) {
+
+        if(err_thresh <= 0 ) {
+            THROW_INVALID_ARGUMENT_ERROR("Error threshold has to be greater then 0!");
+        }
+
+        this->err_thresh = err_thresh;
+        this->pst = pst;
+
+        this->init_constructor(domain_bounds, c1, c2, w, n_particles, iter_max);
+    }
+
     Particle *ParticleSwarm::determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val) {
 
         Particle *p;
@@ -474,4 +490,21 @@ namespace lib4neuro {
         return this->p_min_glob;
     }
 
+    void ParticleSwarm::init_constructor(std::vector<double>* domain_bounds,
+                                         double c1,
+                                         double c2,
+                                         double w,
+                                         size_t n_particles,
+                                         size_t iter_max) {
+        this->c1 = c1;
+        this->c2 = c2;
+        this->c3 = (c1 + c2) / 2.0;
+        this->w = w;
+        this->n_particles = n_particles;
+        this->iter_max = iter_max;
+        this->particle_swarm = new std::vector<Particle *>(this->n_particles);
+        this->domain_bounds = domain_bounds;
+        std::fill(this->particle_swarm->begin(), this->particle_swarm->end(), nullptr);
+    }
+
 }
\ No newline at end of file
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index fd371b73918fb741b05fbb3a6c52ab7728445bec..b6b8d9318debfc1a84d93f671836f728b53c5898 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -10,7 +10,9 @@
 
 #include "../settings.h"
 #include "../ErrorFunction/ErrorFunctions.h"
-#include "ILearningMethods.h"
+#include "LearningMethod.h"
+
+
 
 /**
  *
@@ -92,10 +94,20 @@ public:
 };
 
 namespace lib4neuro {
+
+    /**
+     * Particle Swarm method type differentiating between a general version and a version expecting cost function minima
+     * to be 0!
+     */
+    enum PARTICLE_SWARM_TYPE {
+        GENERAL,
+        MIN_ZERO
+    };
+
     /**
      * Class implementing the Global Particle Swarm Optimization method
      */
-    class ParticleSwarm : public ILearningMethods {
+    class ParticleSwarm : public LearningMethod {
 
     private:
 
@@ -154,6 +166,18 @@ namespace lib4neuro {
          */
         double delta;
 
+        /**
+         * Error threshold - determines a successful convergence
+         *
+         * Must be greater than 0!
+         */
+        double err_thresh = 0;
+
+        /**
+         * Type of particle swarm optimizer
+         */
+        PARTICLE_SWARM_TYPE pst;
+
         /**
          * Bounds for every optimized parameter (p1_lower, p1_upper, p2_lower, p2_upper...)
          */
@@ -188,6 +212,16 @@ namespace lib4neuro {
          */
         LIB4NEURO_API double get_euclidean_distance(std::vector<double> *a, std::vector<double> *b);
 
+        /**
+         *
+         */
+        void init_constructor(std::vector<double> *domain_bounds,
+                              double c1,
+                              double c2,
+                              double w,
+                              size_t n_particles,
+                              size_t iter_max);
+
     public:
 
         /**
@@ -215,6 +249,35 @@ namespace lib4neuro {
                 size_t iter_max = 1000
         );
 
+        /**
+         * Creates an instance of the Global Particle Swarm Optimizer
+         *
+         * WARNING: This constructor expects the cost function minimum to be 0!
+         *
+         * @TODO rewrite WITHOUT PARTICLE_SWARM_TYPE parameter!
+         *
+         * @param domain_bounds Bounds for every optimized parameter (p1_lower, p1_upper, p2_lower, p2_upper...)
+         * @param err_thresh Convergence threshold - error function is given externally
+         * @param PARTICLE_SWARM_TYPE Method type
+         * @param c1 Cognitive parameter
+         * @param c2 Social parameter
+         * @param w Inertia weight
+         * @param n_particles Number of particles in the swarm
+         * @param iter_max Maximal number of iterations - optimization will stop after that, even if not converged
+         * @param err_thresh Error threshold for the method to converge successfully - depending on the used
+         *                   ErrorFunction
+         */
+        LIB4NEURO_API explicit ParticleSwarm(
+                std::vector<double> *domain_bounds,
+                double err_thresh,
+                PARTICLE_SWARM_TYPE,
+                double c1 = 1.711897,
+                double c2 = 1.711897,
+                double w = 0.711897,
+                size_t n_particles = 50,
+                size_t iter_max = 1000
+        );
+
         /**
          *
          */
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index e566efad1f8451af0552f9fe3d053738da456e05..e18b7180dfe5a8aa4b54c05fce106530e36c4844 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -474,8 +474,10 @@ namespace lib4neuro {
         this->delete_weights = false;
     }
 
-    void NeuralNetwork::eval_single(::std::vector<double> &input, ::std::vector<double> &output,
-                                    ::std::vector<double> *custom_weights_and_biases) {
+    void NeuralNetwork::eval_single(::std::vector<double>& input,
+                                    ::std::vector<double>& output,
+                                    ::std::vector<double>* custom_weights_and_biases) {
+
         if ((this->input_neuron_indices->size() * this->output_neuron_indices->size()) <= 0) {
             THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
         }
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index cccdefdd60115600f08a18e6a9346465e6d20f4a..e201c565c3ef2186a3ce87da5741e778c8c2e317 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -361,7 +361,7 @@ namespace lib4neuro {
     }
 
 //TODO instead use general method with Optimizer as its argument (create hierarchy of optimizers)
-    void DESolver::solve(ILearningMethods &learning_method) {
+    void DESolver::solve(LearningMethod &learning_method) {
 
         NeuralNetwork *nn;
         DataSet *ds;
diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h
index 294eef706b2709d01636f68bf704eeb8230419e3..cab736bcc4a8c9d2ba9d8087d04c200f47ff5e4c 100644
--- a/src/Solvers/DESolver.h
+++ b/src/Solvers/DESolver.h
@@ -149,7 +149,7 @@ namespace lib4neuro {
          *
          * @param learning_method
          */
-        LIB4NEURO_API void solve(ILearningMethods &learning_method);
+        LIB4NEURO_API void solve(LearningMethod &learning_method);
 
         /**
          *
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index df83dbddb0f8b5aff2c2f4c37e7849823f6d820b..da241cf77e4ae2871fc1cc2e4aa062e89a153682 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -23,7 +23,7 @@ double get_difference(std::vector<double> &a, std::vector<double> &b){
 
     for( size_t i = 0; i < a.size(); ++i ){
 
-        std::cout << a[i] << " - " << b[i] << std::endl;
+//        std::cout << a[i] << " - " << b[i] << std::endl;
 
         m = a[i]-b[i];
         out += m * m;
@@ -51,8 +51,8 @@ void calculate_gradient_analytical(std::vector<double> &input, std::vector<doubl
 
 int main(int argc, char** argv) {
 
-    int n_tests = 1;
-    int n_hidden_neurons = 2;
+    int n_tests = 10000;
+    int n_hidden_neurons = 20;
     try {
         /* Numbers of neurons in layers (including input and output layers) */
         std::vector<unsigned int> neuron_numbers_in_layers(3);
@@ -77,8 +77,8 @@ int main(int argc, char** argv) {
 
         size_t n_good = 0, n_bad = 0;
 
-        nn1.write_weights();
-        nn1.write_biases();
+//        nn1.write_weights();
+//        nn1.write_biases();
         for(int i = 0; i < n_tests; ++i){
 
             std::vector<double> input(1);
@@ -91,14 +91,14 @@ int main(int argc, char** argv) {
             std::fill(gradient_backprogation.begin(), gradient_backprogation.end(), 0);
             std::fill(gradient_analytical.begin(), gradient_analytical.end(), 0);
 
-            nn1.eval_single_debug(input, output);
+            nn1.eval_single(input, output);
 
             calculate_gradient_analytical(input, *parameter_biases, *parameter_weights, n_hidden_neurons, gradient_analytical );
-            nn1.add_to_gradient_single_debug(input, error_derivative, 1, gradient_backprogation);
+            nn1.add_to_gradient_single(input, error_derivative, 1, gradient_backprogation);
 
             double diff = get_difference(gradient_backprogation, gradient_analytical);
 
-            if ( diff < 1e-32 ){
+            if ( diff < 1e-6 ){
                 n_good++;
             }
             else{
diff --git a/src/examples/simulator.cpp b/src/examples/simulator.cpp
index 053c78dcdbd367b4cab1fa81d9372596e1abb4c6..1126529bb547fadaa6f05ec20a5d4fc24118d79d 100644
--- a/src/examples/simulator.cpp
+++ b/src/examples/simulator.cpp
@@ -81,11 +81,14 @@ int main(int argc, char** argv) {
         // 3) Maximal number of iterations - optimization will stop after that, even if not converged
         l4n::GradientDescentBB gs(prec, restart_interval, max_n_iters_gradient, batch_size);
         l4n::GradientDescentSingleItem gs_si(prec, 0, 5000);
+        l4n::LevenbergMarquardt leven(max_n_iters_gradient);
         l4n::LearningSequence learning_sequence( 1e-6, max_number_of_cycles );
+
         learning_sequence.add_learning_method( &ps );
-        learning_sequence.add_learning_method( &gs );
-        learning_sequence.add_learning_method( &gs_si );
-        learning_sequence.add_learning_method( &gs );
+//        learning_sequence.add_learning_method( &gs );
+        learning_sequence.add_learning_method( &leven );
+//        learning_sequence.add_learning_method( &gs_si );
+//        learning_sequence.add_learning_method( &gs );
 
         /* Weight and bias randomization in the network accordingly to the uniform distribution */
         nn1.randomize_parameters();
diff --git a/src/tests/ErrorFunctions_test.cpp b/src/tests/ErrorFunctions_test.cpp
index d0849efa79cc36ca0ddc6e544a9e56f46d0bf45f..02d5b4bb18c0ce45f22f5ff021e1c63abd9320dc 100644
--- a/src/tests/ErrorFunctions_test.cpp
+++ b/src/tests/ErrorFunctions_test.cpp
@@ -8,8 +8,7 @@
 
 #include "boost_unit_tests_preamble.h"
 
-#include "../ErrorFunction/ErrorFunctions.h"
-#include <turtle/mock.hpp>
+#include "../ErrorFunction/ErrorFunctionsMock.h"
 
 using namespace lib4neuro;
 
@@ -48,24 +47,6 @@ MOCK_BASE_CLASS(mock_network, lib4neuro::NeuralNetwork)
 	MOCK_METHOD(write_stats, 1, void(std::ofstream*), id9)
 };
 
-MOCK_BASE_CLASS(mock_error_fun, ErrorFunction)
-{
-	MOCK_METHOD(eval, 1)
-	MOCK_METHOD(get_dimension, 0)
-	MOCK_METHOD(calculate_error_gradient, 4)
-	MOCK_METHOD(get_parameters, 0)
-	MOCK_METHOD(get_dataset, 0)
-	MOCK_METHOD(get_network_instance, 0)
-	MOCK_METHOD(divide_data_train_test, 1)
-	MOCK_METHOD(return_full_data_set_for_training, 0)
-	MOCK_METHOD(eval_on_test_data, 1, double(std::vector<double>*), id1)
-	MOCK_METHOD(eval_on_test_data, 2, double(std::string, std::vector<double>*), id2)
-	MOCK_METHOD(eval_on_test_data, 2, double(std::ofstream*, std::vector<double>*), id3)
-	MOCK_METHOD(eval_on_data_set, 2, double(DataSet*, std::vector<double>*), id4)
-	MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::string, std::vector<double>*), id5)
-	MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::ofstream*, std::vector<double>*), id6)
-};
-
 MOCK_BASE_CLASS(mock_dataSet, lib4neuro::DataSet)
 {
 	mock_dataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>> *i)
@@ -157,14 +138,14 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
 	
     BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Add_Error_Function_Test) {
         
-		mock_error_fun f;
+		mock_ErrorFunction f;
 		MOCK_EXPECT(f.get_dimension).returns(1);
         ErrorSum mse_sum;
 		BOOST_CHECK_NO_THROW(mse_sum.add_error_function(&f, 1));
     }
 	
     BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Eval_Test) {
-		mock_error_fun f;
+		mock_ErrorFunction f;
 		MOCK_EXPECT(f.get_dimension).returns(1);
 		MOCK_EXPECT(f.eval).returns(1.75);
 		ErrorSum mse_sum;
@@ -179,7 +160,7 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
     BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Get_Dimension_test) {
         ErrorSum mse_sum;
         BOOST_CHECK_EQUAL(0, mse_sum.get_dimension());
-		mock_error_fun f;
+		mock_ErrorFunction f;
 		MOCK_EXPECT(f.get_dimension).returns(2);
 		MOCK_EXPECT(f.eval).returns(1.75);
 		
diff --git a/src/tests/ParticleSwarm_test.cpp b/src/tests/ParticleSwarm_test.cpp
index 18116cf8c4d2832ea34507cdcdb031ec8affeae8..8d0aaf1bbae1a280f1794ec42ee3583c35c89582 100644
--- a/src/tests/ParticleSwarm_test.cpp
+++ b/src/tests/ParticleSwarm_test.cpp
@@ -7,46 +7,12 @@
 
 #define BOOST_TEST_MODULE ParticleSwarm_test
 
-#ifdef _WINDOWS
-#include <boost/test/included/unit_test.hpp>
-//#include <turtle/mock.hpp>
-#endif
-
-#ifndef BOOST_TEST_DYN_LINK
-#define BOOST_TEST_DYN_LINK
-#endif
-
-#ifndef BOOST_TEST_NO_MAIN
-#define BOOST_TEST_NO_MAIN
-#endif
-
-#include <turtle/mock.hpp>
-#include <boost/test/unit_test.hpp>
-#include <boost/test/output_test_stream.hpp>
 #include "boost_unit_tests_preamble.h"
-#include "../ErrorFunction/ErrorFunctions.h"
+#include "../ErrorFunction/ErrorFunctionsMock.h"
 #include "../LearningMethods/ParticleSwarm.h"
 
 using namespace lib4neuro;
 
-MOCK_BASE_CLASS(mock_Error, ErrorFunction)
-{
-	MOCK_METHOD(eval, 1)
-	MOCK_METHOD(get_dimension, 0)
-	MOCK_METHOD(calculate_error_gradient, 4)
-	MOCK_METHOD(get_parameters, 0)
-	MOCK_METHOD(get_dataset, 0)
-	MOCK_METHOD(get_network_instance, 0)
-	MOCK_METHOD(divide_data_train_test, 1)
-	MOCK_METHOD(return_full_data_set_for_training, 0)
-	MOCK_METHOD(eval_on_test_data, 1, double(std::vector<double>*), id1)
-	MOCK_METHOD(eval_on_test_data, 2, double(std::string, std::vector<double>*), id2)
-	MOCK_METHOD(eval_on_test_data, 2, double(std::ofstream*, std::vector<double>*), id3)
-	MOCK_METHOD(eval_on_data_set, 2, double(DataSet*, std::vector<double>*), id4)
-	MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::string, std::vector<double>*), id5)
-	MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::ofstream*, std::vector<double>*), id6)
-};
-
 /**
  * Boost testing suite for testing ParticleSwarm.h
  */
@@ -79,7 +45,7 @@ BOOST_AUTO_TEST_SUITE(ParticleSwarm_test)
 		domain_bound.push_back(5);
 
 
-		mock_Error error;
+		mock_ErrorFunction error;
 
 		MOCK_EXPECT(error.get_dimension).returns(5);
 		MOCK_EXPECT(error.eval).returns(0.8);
diff --git a/src/tests/Particle_test.cpp b/src/tests/Particle_test.cpp
index 0405bec0a34337c6c43e3ee5376dccc10dbf2051..211397c5d937d457e097173ba9bcc618fe970209 100644
--- a/src/tests/Particle_test.cpp
+++ b/src/tests/Particle_test.cpp
@@ -7,45 +7,12 @@
 
 #define BOOST_TEST_MODULE Particle_test
 
-#ifdef _WINDOWS
-#include <boost/test/included/unit_test.hpp>
-//#include <turtle/mock.hpp>
-#endif
-
-#ifndef BOOST_TEST_DYN_LINK
-#define BOOST_TEST_DYN_LINK
-#endif
-
-#ifndef BOOST_TEST_NO_MAIN
-#define BOOST_TEST_NO_MAIN
-#endif
-
-#include <turtle/mock.hpp>
-#include <boost/test/unit_test.hpp>
-#include <boost/test/output_test_stream.hpp>
-
+#include "boost_unit_tests_preamble.h"
+#include "../ErrorFunction/ErrorFunctionsMock.h"
 #include "../LearningMethods/ParticleSwarm.h"
 
 using namespace lib4neuro;
 
-MOCK_BASE_CLASS(mock_Error, ErrorFunction)
-{
-    MOCK_METHOD(eval, 1)
-    MOCK_METHOD(get_dimension, 0)
-    MOCK_METHOD(calculate_error_gradient, 4)
-    MOCK_METHOD(get_parameters, 0)
-    MOCK_METHOD(get_dataset, 0)
-    MOCK_METHOD(get_network_instance, 0)
-    MOCK_METHOD(divide_data_train_test, 1)
-    MOCK_METHOD(return_full_data_set_for_training, 0)
-    MOCK_METHOD(eval_on_test_data, 1, double(std::vector<double>*), id1)
-    MOCK_METHOD(eval_on_test_data, 2, double(std::string, std::vector<double>*), id2)
-    MOCK_METHOD(eval_on_test_data, 2, double(std::ofstream*, std::vector<double>*), id3)
-    MOCK_METHOD(eval_on_data_set, 2, double(DataSet*, std::vector<double>*), id4)
-    MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::string, std::vector<double>*), id5)
-    MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::ofstream*, std::vector<double>*), id6)
-};
-
 /**
  * Boost testing suite for testing ParticleSwarm.h
  * TODO
@@ -54,7 +21,7 @@ BOOST_AUTO_TEST_SUITE(Particle_test)
 
 BOOST_AUTO_TEST_CASE(Particle_construction_test) {
 	std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
-	mock_Error error;
+	mock_ErrorFunction error;
 	MOCK_EXPECT(error.get_dimension).once().returns(5);
 	MOCK_EXPECT(error.eval).once().returns(0.8);
 	BOOST_CHECK_NO_THROW(Particle(&error, &domain_bound));
@@ -62,7 +29,7 @@ BOOST_AUTO_TEST_CASE(Particle_construction_test) {
 
 BOOST_AUTO_TEST_CASE(Particle_get_coordinate_test) {
 	std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
-	mock_Error error;
+	mock_ErrorFunction error;
 
 	MOCK_EXPECT(error.get_dimension).returns(5);
 	MOCK_EXPECT(error.eval).returns(0.8);
@@ -75,7 +42,7 @@ BOOST_AUTO_TEST_CASE(Particle_get_coordinate_test) {
 
 BOOST_AUTO_TEST_CASE(Particle_get_optimal_value_test) {
 	std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
-	mock_Error error;
+	mock_ErrorFunction error;
 
 	MOCK_EXPECT(error.get_dimension).returns(5);
 	MOCK_EXPECT(error.eval).returns(0.8);