diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8ad8068f230d7d463f57971d4a9f7a06d84bd886..83afd697f9b04ea39794d2b305877132d2381759 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -62,46 +62,47 @@ win_visual_studio_static_deps:
 #    script:
 #        - './linux_run_tests.sh'
 #
-## Latest Ubuntu with Boost and Exprtk
-## compiled locally as submodules and
-## linked statically
-#ubuntu_boost_local_static_deps:
-#    tags:
-#        - centos7
-#
-#    image: martinbeseda/ubuntu-ci:latest
-#
-#    before_script:
-#        - cd build_scripts/linux
-#        - ./download_dependencies.sh
-#        - cd ../..
-#        - cd build_scripts/linux
-#        - export DEPENDENCIES_LINK_TYPE=static
-#        - ./linux_gcc_build_x64_debug_local.sh
-#
-#    script:
-#        - './linux_run_tests.sh'
-#
-## Latest Ubuntu with Boost and Exprtk
-## compiled locally as submodules and
-## linked dynamically
-#ubuntu_boost_local_dynamic_deps:
-#    tags:
-#        - centos7
-#
-#    image: martinbeseda/ubuntu-ci:latest
-#
-#    before_script:
-#        - cd build_scripts/linux
-#        - ./download_dependencies.sh
-#        - cd ../..
-#        - cd build_scripts/linux
-#        - export DEPENDENCIES_LINK_TYPE=shared
-#        - ./linux_gcc_build_x64_debug_local.sh
-#
-#    script:
-#        - './linux_run_tests.sh'
+# Latest Ubuntu with Boost and Exprtk
+# compiled locally as submodules and
+# linked statically
+ubuntu_boost_local_static_deps:
+    tags:
+        - centos7
+
+    image: martinbeseda/ubuntu-ci:latest
+
+    before_script:
+        - cd build_scripts/linux
+        - ./download_dependencies.sh
+        - export DEPENDENCIES_LINK_TYPE=static
+        - ./linux_gcc_build_x64_debug_local.sh
+        - cd ../..
 
+    script:
+        - cd build_scripts/linux
+        - './linux_run_tests.sh'
+        - cd ../..
+
+# Latest Ubuntu with Boost and Exprtk
+# compiled locally as submodules and
+# linked dynamically
+ubuntu_boost_local_dynamic_deps:
+    tags:
+        - centos7
+
+    image: martinbeseda/ubuntu-ci:latest
+
+    before_script:
+        - cd build_scripts/linux
+        - ./download_dependencies.sh
+        - export DEPENDENCIES_LINK_TYPE=shared
+        - ./linux_gcc_build_x64_debug_local.sh
+        - cd ../..
+
+    script:
+        - cd build_scripts/linux
+        - './linux_run_tests.sh'
+        - cd ../..
 
 #code_quality:
 #  image: docker:stable
diff --git a/CMakeLists.txt b/CMakeLists.txt
index bb654e3a38ddbf377a729b3f36854ba6096b1b34..a2a03cd7399ad45e2d4fb2c96bda1c574ac9c21a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -121,6 +121,7 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin)
 message("Running CMake in: ${SRC_DIR} ${PROJECT_BINARY_DIR}")
 
 if("${BUILD_LIB}" STREQUAL "no")
+    #TODO this can be removed once we have restructured the compilation of our library
 	link_directories(${LIB4NEURO_DIR})
 	include_directories(${Boost_INCLUDE_DIRS} ${ROOT_DIR}/include ${EXPRTK_INCLUDE_DIR})
 endif()
diff --git a/build.sh b/build.sh
index cf78459333c4084d32605ca6f30adf5760fa44ac..fe05cad1cf9fb33d3894ba5b9bc3ffdbe44c6606 100755
--- a/build.sh
+++ b/build.sh
@@ -7,10 +7,10 @@
 #
 # Modular build parameters
 #
-BUILD_TESTS=yes
+BUILD_TESTS=no
 BUILD_EXAMPLES=yes
-BUILD_LIB=yes
-DEPENDENCIES_LINK_TYPE=shared # shared/static
+BUILD_LIB=no
+DEPENDENCIES_LINK_TYPE=static # shared/static
 
 # Build type (Release/Debug)
 BUILD_TYPE=Debug
diff --git a/build_scripts/linux/build_boost_gcc.sh b/build_scripts/linux/build_boost_gcc.sh
index 54953b558130b0d130f960cc20b6e737c99e3502..082905dc3510f01c421970ba2e723f46f35410fd 100755
--- a/build_scripts/linux/build_boost_gcc.sh
+++ b/build_scripts/linux/build_boost_gcc.sh
@@ -1,8 +1,5 @@
 #!/bin/bash
 
-CYAN='\033[0;36m'
-NC='\033[0m' # No Color
-
 echo "Building '${CYAN}BOOST${NC}' for ${WHITE}Debug${NC}"
 cd ../..
  
diff --git a/build_scripts/linux/download_dependencies.sh b/build_scripts/linux/download_dependencies.sh
index a6d938d1641b74bca747507ed357b5c17548a3f5..1723af15d7c6e210e2e108100759c2d5dbd7663e 100755
--- a/build_scripts/linux/download_dependencies.sh
+++ b/build_scripts/linux/download_dependencies.sh
@@ -31,5 +31,5 @@ then
 
     cd ../../build_scripts/linux
 
-    echo "${GREEN}External dependencies download \& bootstrapping finished${NC}"
+    echo "${GREEN}External dependencies download & bootstrapping finished${NC}"
 fi
diff --git a/build_scripts/linux/linux_clean_after_examples.sh b/build_scripts/linux/linux_clean_after_examples.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9f08f779979dd4e3917eb06588a0aee72605bf66
--- /dev/null
+++ b/build_scripts/linux/linux_clean_after_examples.sh
@@ -0,0 +1,8 @@
+cd ../../build
+
+rm -rf CMakeFiles
+rm -r *.*
+rm -r Makefile
+rm -rf examples
+
+cd ../build_scripts/linux
\ No newline at end of file
diff --git a/build_scripts/linux/linux_clean_after_lib.sh b/build_scripts/linux/linux_clean_after_lib.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1edaaa0fc878e4abd4ddc1a36128922fb17bb164
--- /dev/null
+++ b/build_scripts/linux/linux_clean_after_lib.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+cd ../../build
+
+rm -rf CMakeFiles
+rm -r *.*
+rm -r Makefile
+
+cd ../build_scripts/linux
\ No newline at end of file
diff --git a/build_scripts/linux/linux_clean_after_tests.sh b/build_scripts/linux/linux_clean_after_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..38e99ccfcb8964de8562f36a3a675c9f9a3d7747
--- /dev/null
+++ b/build_scripts/linux/linux_clean_after_tests.sh
@@ -0,0 +1,7 @@
+cd ../../build
+
+rm -rf CMakeFiles
+rm -r *.*
+rm -r Makefile
+
+cd ../build_scripts/linux
\ No newline at end of file
diff --git a/build_scripts/linux/linux_clean_garbage.sh b/build_scripts/linux/linux_clean_garbage.sh
index aaf60d5345630d0d85928cf7703c2b21ac119af9..5081bb42e60f61403eefc72f2d83562009202368 100755
--- a/build_scripts/linux/linux_clean_garbage.sh
+++ b/build_scripts/linux/linux_clean_garbage.sh
@@ -2,6 +2,9 @@
 
 cd ../..
 
+rm -rf *.cbp
+rm -rf *.log
+
 rm -rf Makefile
 rm -rf docs/*
 rm -f src/*TestRunner*
@@ -9,12 +12,13 @@ rm -f src/*.o src/*.mod
 rm -f src/funit.tmp src/*_fun.f90
 rm -f CMakeCache.txt
 rm -f cmake_install.cmake src/cmake_install.cmake
-rm -rf CMakeFiles src/CMakeFiles src/examples/CMakeFiles src/tests/CMakeFiles build/CMakeFiles build/examples/CMakeFiles build/unit-tests/CMakeFiles
-rm -f build/Makefile build/examples/Makefile build/unit-tests/Makefile build/cmake_install.cmake build/examples/cmake_install.cmake build/unit-tests/cmake_install.cmake
 
+rm -rf CMakeFiles src/CMakeFiles src/examples/CMakeFiles src/tests/CMakeFiles
+
+#build/CMakeFiles  build/examples/CMakeFiles build/unit-tests/CMakeFiles
+#rm -f build/Makefile build/examples/Makefile build/unit-tests/Makefile build/cmake_install.cmake build/examples/cmake_install.cmake build/unit-tests/cmake_install.cmake
 #mv build/examples/bin/* build/examples
 #mv build/unit-tests/bin/* build/unit-tests
-
-rm -rf build/examples/bin build/unit-tests/bin
+#rm -rf build/examples/bin build/unit-tests/bin
 
 cd build_scripts/linux
diff --git a/build_scripts/linux/linux_gcc_build_x64_debug_local.sh b/build_scripts/linux/linux_gcc_build_x64_debug_local.sh
index 7af32a96e1023cf369809daad0ea1d36e2a213b9..cb2d95509a4358ea9a1129c0c8013081f238c1e1 100755
--- a/build_scripts/linux/linux_gcc_build_x64_debug_local.sh
+++ b/build_scripts/linux/linux_gcc_build_x64_debug_local.sh
@@ -3,22 +3,23 @@
 clear
 
 # Should we rebuild BOOST? (yes/no)
-REBUILD_BOOST=yes
+REBUILD_BOOST=no
 
 # Should we build the examples? (yes/no)
-BUILD_EXAMPLES=yes
+BUILD_EXAMPLES=no
 
 # Should we build the unit-tests? (yes/no)
 BUILD_TESTS=yes
 
 # Should we build the lib4neuro library? (yes)
-BUILD_LIB=yes
+BUILD_LIB=no
 
 # C++ compiler
 CXX_COMPILER="g++"
 C_COMPILER="gcc"
 
 #**********************DO NOT CHANGE BEYOND THIS LINE****************************************
+BUILD_ERROR_OCCURED=0
 
 RED='\033[0;31m'
 CYAN='\033[0;36m'
@@ -60,17 +61,17 @@ then
     echo "The required '${CYAN}BOOST${NC}' library will be recompiled in the directory '${YELLOW}external_dependencies/boost${NC}'"
     rm -rf ../../external_dependencies/boost/stage/lib/*
     BUILD_SOMETHING=yes
-    BUILD_SOMETHING_LIB=yes
 fi
 
 
 # Boost rebuild
 if [ $REBUILD_BOOST = "yes" ]
 then
-    ./build_boost_gcc.sh
+    ./build_boost_gcc.sh || BUILD_ERROR_OCCURED=1
 fi
 
-if [ $BUILD_SOMETHING_LIB = "yes" ]
+# Should we build the lib4neuro library? (yes)
+if [ $BUILD_SOMETHING_LIB = "yes" -a $BUILD_ERROR_OCCURED = "0" ]
 then
 
     if [ $BUILD_LIB = "yes" ]
@@ -93,13 +94,41 @@ then
 
     cd ../..
 
-    cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=${CXX_COMPILER} -DCMAKE_C_COMPILER=${C_COMPILER} -DBOOST_LIBRARYDIR=${BOOST_LIBRARYDIR} -DBOOST_INCLUDEDIR=${BOOST_INCLUDEDIR} -DBUILD_TESTS=${BUILD_TESTS} -DBUILD_EXAMPLES=${BUILD_EXAMPLES} -DBUILD_LIB=${BUILD_LIB} -DLIB4NEURO_DIR=${PWD}/build/lib -DDEPENDENCIES_LINK_TYPE=${DEPENDENCIES_LINK_TYPE} . 
-	
-    echo "Building the '${CYAN}lib4neuro${NC}' project for ${WHITE}Debug${NC} (building)"
-    ( cmake --build . --config Debug -- -j${N_CORES}  ) && ( echo "${GREEN}Build complete${NC}." ) || ( echo "${RED}Build finished with errors${NC}!"; exit 1; )
+    cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=${CXX_COMPILER} -DCMAKE_C_COMPILER=${C_COMPILER} -DBOOST_LIBRARYDIR=${BOOST_LIBRARYDIR} -DBOOST_INCLUDEDIR=${BOOST_INCLUDEDIR} -DBUILD_TESTS=${BUILD_TESTS} -DBUILD_EXAMPLES=${BUILD_EXAMPLES} -DBUILD_LIB=${BUILD_LIB} -DLIB4NEURO_DIR=${PWD}/build/lib -DDEPENDENCIES_LINK_TYPE=${DEPENDENCIES_LINK_TYPE} . || ( echo "${RED}Makefile preparation finished with errors${NC}!"; BUILD_ERROR_OCCURED=1; )
+
+	if [ $BUILD_ERROR_OCCURED = "0" ]
+	then
+        echo "Building the '${CYAN}lib4neuro${NC}' project for ${WHITE}Debug${NC} (building)"
+        ( cmake --build . --config Debug -- -j${N_CORES}  ) && ( echo "${GREEN}Build complete${NC}." ) || ( echo "${RED}Build finished with errors${NC}!"; BUILD_ERROR_OCCURED=1; )
+	fi
 
     cd build_scripts/linux
 
+fi
+
+if [ $BUILD_LIB = "yes" ]
+then
+    ./linux_clean_after_lib.sh
+fi
+
+if [ $BUILD_EXAMPLES = "yes" ]
+then
+    ./linux_clean_after_examples.sh
+fi
+
+if [ $BUILD_TESTS = "yes" ]
+then
+    ./linux_clean_after_tests.sh
+fi
+
+if [ $BUILD_SOMETHING_LIB = "yes" ]
+then
     ./linux_clean_garbage.sh
 fi
 
+
+if [ $BUILD_ERROR_OCCURED = "1" ]
+then
+    echo "${RED}Build encountered some errors!${NC}"
+    exit 1
+fi
\ No newline at end of file
diff --git a/build_scripts/windows/win_clean_after_examples.bat b/build_scripts/windows/win_clean_after_examples.bat
index 7fd3c151e2656d7d2a1741c0c7386049a4f02fd1..e508c1110540ee3f9a75fc46dce6f78d80af1cca 100644
--- a/build_scripts/windows/win_clean_after_examples.bat
+++ b/build_scripts/windows/win_clean_after_examples.bat
@@ -9,7 +9,7 @@ cd ..\..
 	xcopy /y build\bin\examples\Debug\*.exe build\tmp 2>NUL
 	rmdir /s /q "build\examples" 2> NUL
 	rmdir /s /q "build\lib\Debug" 2> NUL
-	move build\tmp build\examples
+	move build\tmp build\bin\examples
 		
 	xcopy /y build\lib\*.dll build\examples 2>NUL
 		
diff --git a/external_dependencies/boost b/external_dependencies/boost
index 507ad00a0ad1ce6e8833f2f529fa1e9150c446c0..b587bd3bbfb42520838ccd516dabc8684fbdd8a8 160000
--- a/external_dependencies/boost
+++ b/external_dependencies/boost
@@ -1 +1 @@
-Subproject commit 507ad00a0ad1ce6e8833f2f529fa1e9150c446c0
+Subproject commit b587bd3bbfb42520838ccd516dabc8684fbdd8a8
diff --git a/external_dependencies/exprtk b/external_dependencies/exprtk
index 9a8474e7a259fa5348658a651cd19af216749674..9836f21d07b1bf799e6877324268708f61c01f73 160000
--- a/external_dependencies/exprtk
+++ b/external_dependencies/exprtk
@@ -1 +1 @@
-Subproject commit 9a8474e7a259fa5348658a651cd19af216749674
+Subproject commit 9836f21d07b1bf799e6877324268708f61c01f73
diff --git a/include/4neuro.h b/include/4neuro.h
deleted file mode 100644
index 12e1c482b983fe66dcad286b0d63330190573da9..0000000000000000000000000000000000000000
--- a/include/4neuro.h
+++ /dev/null
@@ -1,27 +0,0 @@
-//
-// Created by martin on 7/16/18.
-//
-
-#ifndef INC_4NEURO_4NEURO_H
-#define INC_4NEURO_4NEURO_H
-
-//TODO make only public interface visible
-
-#include "../src/DataSet/DataSet.h"
-#include "../src/ErrorFunction/ErrorFunctions.h"
-#include "../src/LearningMethods/ParticleSwarm.h"
-#include "../src/NetConnection/ConnectionFunctionGeneral.h"
-#include "../src/NetConnection/ConnectionFunctionIdentity.h"
-#include "../src/Network/NeuralNetwork.h"
-#include "../src/Network/NeuralNetworkSum.h"
-#include "../src/Neuron/Neuron.h"
-#include "../src/Neuron/NeuronBinary.h"
-#include "../src/Neuron/NeuronLinear.h"
-#include "../src/Neuron/NeuronLogistic.h"
-#include "../src/Solvers/DESolver.h"
-#include "../src/constants.h"
-#include "../src/settings.h"
-#include "../src/message.h"
-
-
-#endif //INC_4NEURO_4NEURO_H
diff --git a/include/lib4neuro.h b/include/lib4neuro.h
new file mode 100644
index 0000000000000000000000000000000000000000..5bf4710e93dc0c5ca00cfb6d1c4417a9003cbe57
--- /dev/null
+++ b/include/lib4neuro.h
@@ -0,0 +1,61 @@
+//
+// Created by martin on 7/16/18.
+// Edited by michal on 9/15/18
+//
+
+#ifndef INC_4NEURO_4NEURO_H
+#define INC_4NEURO_4NEURO_H
+
+/* PREREQUISITIES */
+//#include <string>
+
+/**
+ * If defined, the NN feed-forward will print out whats happening
+ */
+//#define VERBOSE_NN_EVAL
+
+#ifdef _WINDOWS
+#define LIB4NEURO_API __declspec(dllexport)
+#else
+#define LIB4NEURO_API
+#endif
+
+/* CLASSES */
+//class DataSet;
+
+/* ACCESSIBLE CLASS METHODS */
+//static DataSet::DataSet(std::string file_path);
+
+#include "../src/DataSet/DataSet.h"
+
+#include <iostream>
+#include <fstream>
+#include <utility>
+#include <vector>
+#include <exception>
+#include <string>
+#include <functional>
+//#include <boost/serialization/base_object.hpp>
+//#include <boost/range/size_type.hpp>
+//#include <boost/serialization/vector.hpp>
+//#include <boost/serialization/utility.hpp>
+//#include <boost/archive/text_oarchive.hpp>
+//#include <boost/archive/text_iarchive.hpp>
+
+
+#include "../src/ErrorFunction/ErrorFunctions.h"
+#include "../src/LearningMethods/ParticleSwarm.h"
+#include "../src/LearningMethods/GradientDescent.h"
+#include "../src/NetConnection/ConnectionFunctionGeneral.h"
+#include "../src/NetConnection/ConnectionFunctionIdentity.h"
+#include "../src/Network/NeuralNetwork.h"
+#include "../src/Network/NeuralNetworkSum.h"
+#include "../src/Solvers/DESolver.h"
+
+/* OK */
+#include "../src/Neuron/Neuron.h"
+#include "../src/Neuron/NeuronBinary.h"
+#include "../src/Neuron/NeuronLinear.h"
+#include "../src/Neuron/NeuronLogistic.h"
+
+#endif //INC_4NEURO_4NEURO_H
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4fd5ccc16c4c98fdf196b6ec0b4e0eb1e7abc04b..7ff3ef006dd8291a0e2b95a01442de8dccebe805 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -8,7 +8,7 @@ endif ()
 
 if ("${BUILD_LIB}" STREQUAL "yes")
 	add_library(lib4neuro SHARED
-		Neuron/Neuron.cpp
+			Neuron/Neuron.cpp
 		Neuron/NeuronBinary.cpp
 		Neuron/NeuronConstant.cpp
 		Neuron/NeuronLinear.cpp
@@ -17,7 +17,8 @@ if ("${BUILD_LIB}" STREQUAL "yes")
 		Network/NeuralNetworkSum.cpp
 		NetConnection/ConnectionFunctionGeneral.cpp
 		NetConnection/ConnectionFunctionIdentity.cpp
-		LearningMethods/ParticleSwarm.cpp
+        LearningMethods/ParticleSwarm.cpp
+        LearningMethods/GradientDescent.cpp
 		DataSet/DataSet.cpp
 		ErrorFunction/ErrorFunctions.cpp
 		Solvers/DESolver.cpp
diff --git a/src/DataSet/DataSet.h b/src/DataSet/DataSet.h
index 92418b6ec6d5297b76cf66c7230ba9c9061472ce..0c037f1c04c2b6a8c30f2effc46b40cc570b0ca7 100644
--- a/src/DataSet/DataSet.h
+++ b/src/DataSet/DataSet.h
@@ -5,8 +5,6 @@
 #ifndef INC_4NEURO_DATASET_H
 #define INC_4NEURO_DATASET_H
 
-#include "../settings.h"
-
 #include <iostream>
 #include <fstream>
 #include <utility>
@@ -32,13 +30,13 @@ public:
     /**
      * Constructor with the general error message
      */
-    LIB4NEURO_API InvalidDimension();
+     InvalidDimension();
 
     /**
      * Constructor with specific error message
      * @param msg Specific error message
      */
-    LIB4NEURO_API explicit InvalidDimension(std::string msg);
+     explicit InvalidDimension(std::string msg);
 };
 
 /**
@@ -94,13 +92,13 @@ public:
      * Constructor reading data from the file
      * @param file_path Path to the file with stored data set
      */
-    LIB4NEURO_API DataSet(std::string file_path);
+     DataSet(std::string file_path);
 
     /**
      * Constructor accepting data vector
      * @param data_ptr Pointer to the vector containing data
      */
-    LIB4NEURO_API DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>>* data_ptr);
+     DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>>* data_ptr);
 
     /**
      * Creates a new data set with input values equidistantly positioned
@@ -116,7 +114,7 @@ public:
      * @param size Number of input-output pairs generated
      * @param output Constant output value
      */
-    LIB4NEURO_API DataSet(double lower_bound, double upper_bound, unsigned int size, double output);
+     DataSet(double lower_bound, double upper_bound, unsigned int size, double output);
 
     /**
      *
@@ -125,39 +123,39 @@ public:
      * @param output_func
      * @param output_dim
      */
-    LIB4NEURO_API DataSet(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&), unsigned int output_dim);
+     DataSet(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&), unsigned int output_dim);
 
     /**
      * Getter for number of elements
      * @return Number of elements in the data set
      */
-    LIB4NEURO_API size_t get_n_elements();
+     size_t get_n_elements();
 
     /**
      * Returns the input dimension
      * @return Input dimension
      */
-    LIB4NEURO_API size_t get_input_dim();
+     size_t get_input_dim();
 
 
     /**
      * Return the output dimension
      * @return Output dimension
      */
-    LIB4NEURO_API size_t get_output_dim();
+     size_t get_output_dim();
 
     /**
      * Getter for the data structure
      * @return Vector of data
      */
-    LIB4NEURO_API std::vector<std::pair<std::vector<double>, std::vector<double>>>* get_data();
+     std::vector<std::pair<std::vector<double>, std::vector<double>>>* get_data();
 
     /**
      * Adds a new pair of data to the data set
      * @param inputs Vector of input data
      * @param outputs Vector of output data corresponding to the input data
      */
-    LIB4NEURO_API void add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs);
+     void add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs);
 
     //TODO expand method to generate multiple data types - chebyshev etc.
     /**
@@ -172,7 +170,7 @@ public:
      * @param size Number of input-output pairs generated
      * @param output Constant output value
      */
-    LIB4NEURO_API void add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output);
+     void add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output);
 
     /**
      * Adds a new data with input values equidistantly positioned
@@ -186,19 +184,19 @@ public:
      * @param size Number of input-output pairs generated
      * @param output_func Function determining output value
      */
-    LIB4NEURO_API void add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&));
+     void add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&));
 
     //TODO Chebyshev - ch. interpolation points, i-th point = cos(i*alpha) from 0 to pi
 
     /**
      * Prints the data set
      */
-    LIB4NEURO_API void print_data();
+     void print_data();
 
     /**
      * Stores the DataSet object to the binary file
      */
-    LIB4NEURO_API void store_text(std::string &file_path);
+     void store_text(std::string &file_path);
 };
 
 #endif //INC_4NEURO_DATASET_H
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 54d3f04610d91b5688a6a76a59e633f0145a7764..de4db5122c9d9f7a76669a518d7858ad311da6a9 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -18,7 +18,7 @@ MSE::MSE(NeuralNetwork *net, DataSet *ds) {
 }
 
 double MSE::eval(std::vector<double> *weights) {
-    unsigned int dim_out = this->ds->get_output_dim();
+    size_t dim_out = this->ds->get_output_dim();
 //    unsigned int dim_in = this->ds->get_input_dim();
     size_t n_elements = this->ds->get_n_elements();
     double error = 0.0, val;
@@ -30,15 +30,15 @@ double MSE::eval(std::vector<double> *weights) {
 
     std::vector<double> output( dim_out );
 
-    for(unsigned int i = 0; i < n_elements; ++i){  // Iterate through every element in the test set
+    for( auto el: *data ){  // Iterate through every element in the test set
 
-        this->net->eval_single(data->at(i).first, output, weights);  // Compute the net output and store it into 'output' variable
+        this->net->eval_single(el.first, output, weights);  // Compute the net output and store it into 'output' variable
 
 
 //        printf("errors: ");
-        for(unsigned int j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
+        for(size_t j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
 
-            val = output[j] - data->at(i).second[j];
+            val = output[j] - el.second[j];
             error += val * val;
 
 //            printf("%f, ", val * val);
@@ -51,6 +51,46 @@ double MSE::eval(std::vector<double> *weights) {
     return error/n_elements;
 }
 
+void MSE::calculate_error_gradient( std::vector<double> &params, std::vector<double> &grad, double alpha ) {
+
+    size_t dim_out = this->ds->get_output_dim( );
+    size_t n_elements = this->ds->get_n_elements();
+
+    std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
+
+    std::vector<double> error_derivative( dim_out );
+
+
+    for( auto el: *data ){  // Iterate through every element in the test set
+
+        this->net->eval_single(el.first, error_derivative, &params);  // Compute the net output and store it into 'output' variable
+
+        for( size_t j = 0; j < dim_out; ++j){
+            error_derivative[ j ] = 2.0 * (error_derivative[ j ] - el.second[ j ]); //real - expected result
+        }
+
+        this->net->add_to_gradient_single( el.first, error_derivative, alpha / n_elements, grad );
+    }
+}
+
+std::vector<double>* MSE::get_parameters() {
+    std::vector<double> *output = new std::vector<double>( this->net->get_n_weights( ) + this->net->get_n_biases( ) );
+
+    size_t i = 0;
+
+    for(auto el: *this->net->get_parameter_ptr_weights()){
+        output->at( i ) = el;
+        ++i;
+    }
+
+    for(auto el: *this->net->get_parameter_ptr_biases()){
+        output->at( i ) = el;
+        ++i;
+    }
+
+    return output;
+}
+
 ErrorSum::ErrorSum() {
     this->summand = nullptr;
     this->summand_coefficient = nullptr;
@@ -81,6 +121,18 @@ double ErrorSum::eval(std::vector<double> *weights) {
     return output;
 }
 
+void ErrorSum::calculate_error_gradient( std::vector<double> &params, std::vector<double> &grad, double alpha ) {
+
+    ErrorFunction *ef = nullptr;
+    for( size_t i = 0; i < this->summand->size( ); ++i ){
+        ef = this->summand->at( i );
+
+        if( ef ){
+            ef->calculate_error_gradient( params, grad, this->summand_coefficient->at( i ) * alpha );
+        }
+    }
+}
+
 void ErrorSum::add_error_function( ErrorFunction *F, double alpha ) {
     if(!this->summand){
         this->summand = new std::vector<ErrorFunction*>(0);
@@ -111,4 +163,8 @@ size_t ErrorSum::get_dimension() {
 //        this->dimension = max;
 //    }
     return this->dimension;
+}
+
+std::vector<double>* ErrorSum::get_parameters() {
+    return this->summand->at( 0 )->get_parameters( );
 }
\ No newline at end of file
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 25719e6f03e72866da91376932fd0138f2b3df10..3bef0bd82d8924366e727f68eee1e2e3574b5454 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -5,8 +5,6 @@
 #ifndef INC_4NEURO_ERRORFUNCTION_H
 #define INC_4NEURO_ERRORFUNCTION_H
 
-#include "../settings.h"
-
 #include "../Network/NeuralNetwork.h"
 #include "../DataSet/DataSet.h"
 #include "exprtk.hpp"
@@ -31,7 +29,27 @@ public:
      * 
      * @return 
      */
-    LIB4NEURO_API virtual size_t get_dimension();
+     virtual size_t get_dimension();
+
+     /**
+      *
+      * @param params
+      * @param grad
+      */
+     virtual void calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha = 1.0 ) = 0;
+
+
+     /**
+      *
+      * @return
+      */
+     virtual std::vector<double>* get_parameters( ) = 0;
+
+     /**
+      * //TODO delete after gradient learning is debugged
+      * @return
+      */
+     virtual DataSet* get_dataset() = 0;
 
 protected:
 
@@ -39,6 +57,11 @@ protected:
      *
      */
     size_t dimension = 0;
+
+    /**
+     *
+     */
+    NeuralNetwork* net = nullptr;
 };
 
 class MSE : public ErrorFunction {
@@ -49,18 +72,34 @@ public:
      * @param net
      * @param ds
      */
-    LIB4NEURO_API MSE(NeuralNetwork* net, DataSet* ds);
+     MSE(NeuralNetwork* net, DataSet* ds);
 
     /**
      *
      * @param weights
      * @return
      */
-    LIB4NEURO_API virtual double eval(std::vector<double>* weights = nullptr);
+     double eval(std::vector<double>* weights = nullptr) override;
+
+     /**
+      *
+      * @param params
+      * @param grad
+      */
+    void calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha = 1.0 ) override;
+
+    /**
+     *
+     * @return
+     */
+    std::vector<double>* get_parameters( ) override;
+
+    DataSet* get_dataset() override {
+        return this->ds;
+    };
 
 private:
 
-    NeuralNetwork* net;
     DataSet* ds;
 };
 
@@ -69,31 +108,48 @@ public:
     /**
      *
      */
-    LIB4NEURO_API ErrorSum();
+     ErrorSum();
 
     /**
      *
      */
-    LIB4NEURO_API ~ErrorSum();
+     ~ErrorSum();
 
     /**
      *
      * @param weights
      * @return
      */
-    LIB4NEURO_API virtual double eval(std::vector<double>* weights = nullptr);
+     double eval(std::vector<double>* weights = nullptr) override;
 
     /**
      *
      * @param F
      */
-    LIB4NEURO_API void add_error_function( ErrorFunction *F, double alpha = 1.0 );
+     void add_error_function( ErrorFunction *F, double alpha = 1.0 );
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API size_t get_dimension() override;
+     size_t get_dimension( ) override;
+
+     /**
+      *
+      * @param params
+      * @param grad
+      */
+    void calculate_error_gradient( std::vector<double> &params, std::vector<double> &grad, double alpha = 1.0 ) override;
+
+    /**
+     *
+     * @return
+     */
+    std::vector<double>* get_parameters( ) override;
+
+    DataSet* get_dataset() override {
+        return this->summand->at( 0 )->get_dataset();
+    };
 
 private:
     std::vector<ErrorFunction*>* summand;
diff --git a/src/General/ExprtkWrapper.h b/src/General/ExprtkWrapper.h
index df58142e2386bb566cf55061b653b6408ba5198c..b3e9ca5515ee0758d82a929ac6d7ed378a932118 100644
--- a/src/General/ExprtkWrapper.h
+++ b/src/General/ExprtkWrapper.h
@@ -8,7 +8,7 @@
 #ifndef LIB4NEURO_EXPRTKWRAPPER_H
 #define LIB4NEURO_EXPRTKWRAPPER_H
 
-#include "../settings.h"
+
 
 #include "exprtk.hpp"
 
@@ -25,17 +25,17 @@ public:
      * @param expression_string
      * @param var_dim
      */
-    LIB4NEURO_API ExprtkWrapper( std::string expression_string );
+     ExprtkWrapper( std::string expression_string );
 
     /**
      *
      */
-    LIB4NEURO_API ExprtkWrapper( );
+     ExprtkWrapper( );
 
     /**
      *
      */
-    LIB4NEURO_API ~ExprtkWrapper();
+     ~ExprtkWrapper();
 
     /**
      *
@@ -45,14 +45,14 @@ public:
      * @param x4
      * @return
      */
-    LIB4NEURO_API double eval(double x1 = 0.0, double x2 = 0.0, double x3 = 0.0, double x4 = 0.0);
+     double eval(double x1 = 0.0, double x2 = 0.0, double x3 = 0.0, double x4 = 0.0);
 
     /**
      *
      * @param p
      * @return
      */
-    LIB4NEURO_API double eval(std::vector<double> &p);
+     double eval(std::vector<double> &p);
 
 private:
 
diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index ef5c12fc4164437dfdca429faac18fcfe1a13874..0a013b01b315a2bfc5001d41e23e29a0a0ebfd3c 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -6,3 +6,135 @@
  */
 
 #include "GradientDescent.h"
+
+GradientDescent::GradientDescent(double epsilon, size_t n_to_restart){
+    this->tolerance = epsilon;
+    this->restart_frequency = n_to_restart;
+    this->optimal_parameters = new std::vector<double>(0);
+}
+
+GradientDescent::~GradientDescent() {
+    if( this->optimal_parameters ){
+        delete this->optimal_parameters;
+        this->optimal_parameters = nullptr;
+    }
+}
+
+void GradientDescent::eval_step_size_mk(double &gamma, double beta, double &c, double grad_norm_prev,
+                                          double grad_norm, double fi, double fim ) {
+
+    if( fi > fim )
+    {
+        c /= 1.0000005;
+    }
+    else if( fi < fim )
+    {
+        c *= 1.0000005;
+    }
+
+    gamma *= std::pow( c, 1.0 - 2.0 * beta) * std::pow( grad_norm_prev / grad_norm, 1.0 / c );
+
+}
+
+void GradientDescent::optimize( ErrorFunction &ef ) {
+
+    std::cout << "Finding a solution via a Gradient Descent method with adaptive step-length" << std::endl;
+    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
+    double grad_norm_prev;
+    size_t i, iter_idx = 0;
+
+    gamma = 1.0;
+    double prev_val, val = 0.0, c = 1.25;
+
+    size_t n_parameters = ef.get_dimension();
+
+
+    std::vector<double> *gradient_current = new std::vector<double>( n_parameters );
+    std::vector<double> *gradient_prev = new std::vector<double>( n_parameters );
+    std::vector<double> *params_current = ef.get_parameters( );
+    std::vector<double> *params_prev = new std::vector<double>( n_parameters );
+    std::vector<double> *ptr_mem;
+
+//    std::vector<double> gradient_mem( n_parameters );
+//    std::vector<double> parameters_analytical( n_parameters );
+
+
+    std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
+    std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
+
+    while( grad_norm > this->tolerance ) {
+        iter_idx++;
+        prev_val = val;
+        grad_norm_prev = grad_norm;
+
+        /* reset of the current gradient */
+        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
+//        std::fill(gradient_mem.begin(), gradient_mem.end(), 0.0);
+        ef.calculate_error_gradient( *params_current, *gradient_current );
+//        double error_analytical = this->calculate_gradient( ef.get_dataset()->get_data(), (size_t)2, params_current, gradient_current );
+
+//        for(size_t k = 0; k < gradient_mem.size(); ++k){
+//            printf("%f : %f\n", gradient_mem[ k ], gradient_current->at( k ));
+//        }
+//        printf("---------------------\n");
+
+        grad_norm = 0.0;
+        for(auto v: *gradient_current){
+            grad_norm += v * v;
+        }
+        grad_norm = std::sqrt(grad_norm);
+
+        /* Update of the parameters */
+        /* step length calculation */
+        if(iter_idx < 10 || iter_idx % this->restart_frequency == 0 ){
+            /* fixed step length */
+            gamma = 0.1 * this->tolerance;
+        }
+
+        else{
+            /* angle between two consecutive gradients */
+            sx = 0.0;
+            for(i = 0; i < gradient_current->size(); ++i){
+                sx += (gradient_current->at( i ) * gradient_prev->at( i ));
+            }
+            sx /= grad_norm * grad_norm_prev;
+            beta = std::sqrt(std::acos( sx ) / PI);
+
+            eval_step_size_mk( gamma, beta, c, grad_norm_prev, grad_norm, val, prev_val );
+        }
+
+        for(i = 0; i < gradient_current->size(); ++i){
+            (*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
+        }
+
+        /* switcheroo */
+        ptr_mem = gradient_prev;
+        gradient_prev = gradient_current;
+        gradient_current = ptr_mem;
+
+        ptr_mem = params_prev;
+        params_prev = params_current;
+        params_current = ptr_mem;
+
+        val = ef.eval( params_current );
+        if(iter_idx % 1 == 0){
+            printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r", (int)iter_idx, gamma, c, grad_norm, val);
+            std::cout.flush();
+        }
+    }
+    printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\n", (int)iter_idx, gamma, c, grad_norm, val);
+    std::cout.flush();
+
+
+    *this->optimal_parameters = *params_current;
+
+    delete gradient_current;
+    delete gradient_prev;
+    delete params_current;
+    delete params_prev;
+}
+
+std::vector<double>* GradientDescent::get_parameters() {
+    return this->optimal_parameters;
+}
\ No newline at end of file
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index 91f72c53c242b375b39de4c8f9f038b98cfdb50f..4d5db3a51f0f07f6bc63327624cb8f655d450e53 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -8,8 +8,348 @@
 #ifndef INC_4NEURO_GRADIENTDESCENT_H
 #define INC_4NEURO_GRADIENTDESCENT_H
 
+#include "ILearningMethods.h"
+#include "../ErrorFunction/ErrorFunctions.h"
+
+class GradientDescent: public ILearningMethods {
+
+private:
+
+    /**
+     *
+     */
+    double tolerance;
+
+    /**
+     *
+     */
+    size_t restart_frequency;
+
+    std::vector<double> *optimal_parameters;
+
+    /**
+     *
+     * @param gamma
+     * @param beta
+     * @param c
+     * @param grad_norm_prev
+     * @param grad_norm
+     * @param fi
+     * @param fim
+     */
+     virtual void eval_step_size_mk( double &gamma, double beta, double &c, double grad_norm_prev, double grad_norm, double fi, double fim );
+    double eval_f(double x){
+        return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
+    }
+
+    double eval_df(double x){
+        return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
+    }
+
+    double eval_ddf(double x){
+        return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
+    }
+
+    double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> &parameters){
+        double value= 0.0, wi, ai, bi, ei, ei1;
+        for(size_t i = 0; i < n_inner_neurons; ++i){
+
+            wi = parameters[3 * i];
+            ai = parameters[3 * i + 1];
+            bi = parameters[3 * i + 2];
+
+            ei = std::pow(E, bi - wi * x);
+            ei1 = ei + 1.0;
+
+            value += ai / (ei1);
+        }
+        return value;
+    }
+
+    double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> &parameters){
+        double value= 0.0, wi, ai, bi, ei, ei1;
+        for(size_t i = 0; i < n_inner_neurons; ++i){
+
+            wi = parameters[3 * i];
+            ai = parameters[3 * i + 1];
+            bi = parameters[3 * i + 2];
+
+            ei = std::pow(E, bi - wi * x);
+            ei1 = ei + 1.0;
+
+            value += ai * wi * ei / (ei1 * ei1);
+        }
+
+        return value;
+    }
+
+    double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> &parameters){
+        double value= 0.0, wi, ai, bi, ewx, eb;
+
+        for(size_t i = 0; i < n_inner_neurons; ++i){
+
+            wi = parameters[3 * i];
+            ai = parameters[3 * i + 1];
+            bi = parameters[3 * i + 2];
+
+
+            eb = std::pow(E, bi);
+            ewx = std::pow(E, wi * x);
+
+            value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+        }
+
+        return value;
+    }
+
+//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
+    double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, ai, bi, ei, ei1;
+        wi = parameters[3 * neuron_idx];
+        ai = parameters[3 * neuron_idx + 1];
+        bi = parameters[3 * neuron_idx + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        return (ai * x * ei) / (ei1 * ei1);
+    }
+
+//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
+    double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> &parameters){
+
+        double wi, ai, bi, ei, ei1;
+
+        wi = parameters[3 * neuron_idx];
+        ai = parameters[3 * neuron_idx + 1];
+        bi = parameters[3 * neuron_idx + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
+    }
+
+//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
+    double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, ai, bi, eb, ewx;
+
+        wi = parameters[3 * neuron_idx];
+        ai = parameters[3 * neuron_idx + 1];
+        bi = parameters[3 * neuron_idx + 2];
+
+        eb = std::pow(E, bi);
+        ewx = std::pow(E, wi * x);
+
+        return  -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+    }
+
+//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
+    double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, bi, ei, ei1;
+
+        wi = parameters[3 * neuron_idx];
+        bi = parameters[3 * neuron_idx + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        return 1.0 / ei1;
+    }
+
+//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
+    double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, bi, ei, ei1;
+
+        wi = parameters[3 * neuron_idx];
+        bi = parameters[3 * neuron_idx + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        return (wi*ei)/(ei1 * ei1);
+    }
+
+//ddNN partial derivative (ai):  -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
+    double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, bi, eip, ewx, eb;
+
+        wi = parameters[3 * neuron_idx];
+        bi = parameters[3 * neuron_idx + 2];
+
+        eip = std::pow(E, bi + wi * x);
+        eb = std::pow(E, bi);
+        ewx = std::pow(E, wi * x);
+
+        return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+    }
+
+//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
+    double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, bi, ei, ai, ei1;
+        wi = parameters[3 * neuron_idx];
+        ai = parameters[3 * neuron_idx + 1];
+        bi = parameters[3 * neuron_idx + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        return -(ai * ei)/(ei1 * ei1);
+    }
+
+//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
+    double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, bi, ai, ewx, eb;
+
+        wi = parameters[3 * neuron_idx];
+        ai = parameters[3 * neuron_idx + 1];
+        bi = parameters[3 * neuron_idx + 2];
+
+        eb = std::pow(E, bi);
+        ewx = std::pow(E, wi*x);
+
+        return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+    }
+
+//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
+    double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
+        double wi, bi, ai, ewx, eb;
+
+        wi = parameters[3 * neuron_idx];
+        ai = parameters[3 * neuron_idx + 1];
+        bi = parameters[3 * neuron_idx + 2];
+
+        eb = std::pow(E, bi);
+        ewx = std::pow(E, wi*x);
+
+        return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
+    }
+
+    double eval_error_function(std::vector<double> &parameters, size_t n_inner_neurons, std::vector<double> test_points){
+
+        double output = 0.0, approx, frac = 1.0 / (test_points.size());
+
+        for(auto x: test_points){
+            /* governing equation */
+            approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
+
+            output += (0.0 - approx) * (0.0 - approx) * frac;
+
+        }
+
+        /* BC */
+        approx = eval_approx_f(0.0, n_inner_neurons, parameters);
+        output += (1.0 - approx) * (1.0 - approx);
+
+        approx = eval_approx_df(0.0, n_inner_neurons, parameters);
+        output += (1.0 - approx) * (1.0 - approx);
+
+
+        return output;
+    }
+
+    void eval_step_size_simple(double &gamma, double val, double prev_val, double sk, double grad_norm, double grad_norm_prev){
+
+        if(val > prev_val){
+            gamma *= 0.99999;
+        }
+
+        if(sk <= 1e-3 || grad_norm < grad_norm_prev){
+            /* movement on a line */
+            /* new slope is less steep, speed up */
+            gamma *= 1.0005;
+        }
+        else if(grad_norm > grad_norm_prev){
+            /* new slope is more steep, slow down*/
+            gamma /= 1.0005;
+        }
+        else{
+            gamma /= 1.005;
+        }
+//        gamma *= 0.999999;
+    }
+
+
+    double calculate_gradient( std::vector<std::pair<std::vector<double>, std::vector<double>>> *dataset, size_t n_inner_neurons, std::vector<double> *parameters, std::vector<double> *gradient ){
+        size_t i, j;
+        double x,  mem, derror, total_error, approx;
+
+        std::vector<double> data_points(dataset->size());
+        for( i = 0; i < dataset->size(); ++i){
+            data_points[ i ] = dataset->at( i ).first[0];
+        }
+
+        std::vector<double> parameters_analytical(parameters->size());
+        for(i = 0; i < n_inner_neurons; ++i){
+            parameters_analytical[3 * i + 0] = parameters->at(0 * n_inner_neurons + i);
+            parameters_analytical[3 * i + 1] = parameters->at(1 * n_inner_neurons + i);
+            parameters_analytical[3 * i + 2] = parameters->at(2 * n_inner_neurons + i);
+        }
+        size_t train_size = data_points.size();
+
+        /* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
+        x = 0.0;
+        mem = (1.0 - eval_approx_f(x, n_inner_neurons, *parameters));
+        derror = 2.0 * mem;
+        total_error = mem * mem;
+        for(i = 0; i < n_inner_neurons; ++i){
+            (*gradient)[i] -= derror * eval_approx_dw_f(x, i, *parameters);
+            (*gradient)[i + n_inner_neurons] -= derror * eval_approx_da_f(x, i, *parameters);
+            (*gradient)[i + 2*n_inner_neurons] -= derror * eval_approx_db_f(x, i, *parameters);
+        }
+
+        /* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
+        mem = (1.0 - eval_approx_df(x, n_inner_neurons, *parameters));
+        derror = 2.0 * mem;
+        total_error += mem * mem;
+        for(i = 0; i < n_inner_neurons; ++i){
+            (*gradient)[i] -= derror * eval_approx_dw_df(x, i, *parameters);
+            (*gradient)[i + n_inner_neurons] -= derror * eval_approx_da_df(x, i, *parameters);
+            (*gradient)[i + 2*n_inner_neurons] -= derror * eval_approx_db_df(x, i, *parameters);
+        }
+
+        for(j = 0; j < data_points.size(); ++j){
+            x = data_points[j];
+            /* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
+            approx= eval_approx_ddf(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_f(x, n_inner_neurons, *parameters);
+            mem = 0.0 - approx;
+            derror = 2.0 * mem / train_size;
+            for(i = 0; i < n_inner_neurons; ++i){
+                (*gradient)[i] -= derror * (eval_approx_dw_ddf(x, i, *parameters) + 4.0 * eval_approx_dw_df(x, i, *parameters) + 4.0 * eval_approx_dw_f(x, i, *parameters));
+                (*gradient)[i + n_inner_neurons] -= derror * (eval_approx_da_ddf(x, i, *parameters) + 4.0 * eval_approx_da_df(x, i, *parameters) + 4.0 * eval_approx_da_f(x, i, *parameters));
+                (*gradient)[i + 2*n_inner_neurons] -= derror * (eval_approx_db_ddf(x, i, *parameters) + 4.0 * eval_approx_db_df(x, i, *parameters) + 4.0 * eval_approx_db_f(x, i, *parameters));
+            }
+            total_error += mem * mem / train_size;
+        }
+
+        return total_error;
+    }
+
+public:
+
+    /**
+     *
+     * @param epsilon
+     */
+    GradientDescent( double epsilon = 1e-3, size_t n_to_restart = 100 );
+
+    /**
+     *
+     */
+    ~GradientDescent();
+
+    /**
+     *
+     * @param ef
+     */
+    virtual void optimize( ErrorFunction &ef );
+
+    /**
+     *
+     * @return
+     */
+    virtual std::vector<double>* get_parameters( );
 
-class GradientDescent {
 
 };
 
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index bf2bb2db363dd60766d5d824ea1bd2e4698c645b..007ed5c3cc731f0ea01d26360c287580bf2b408c 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -8,8 +8,6 @@
 #ifndef INC_4NEURO_PARTICLESWARM_H
 #define INC_4NEURO_PARTICLESWARM_H
 
-#include "../settings.h"
-
 #include <cstdlib>
 #include <ctime>
 #include <cmath>
@@ -57,38 +55,38 @@ public:
     /**
      *
      */
-    LIB4NEURO_API void print_coordinate();
+     void print_coordinate();
 
     /**
      *
      * @param f_dim
      */
-    LIB4NEURO_API Particle( ErrorFunction *ef, std::vector<double> *domain_bounds );
-    LIB4NEURO_API ~Particle( );
+     Particle( ErrorFunction *ef, std::vector<double> *domain_bounds );
+     ~Particle( );
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_coordinate();
+     std::vector<double>* get_coordinate();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API double get_current_value();
+     double get_current_value();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API double get_optimal_value();
+     double get_optimal_value();
 
     /**
      *
      * @param ref_coordinate
      */
-    LIB4NEURO_API void get_optimal_coordinate(std::vector<double> &ref_coordinate);
+     void get_optimal_coordinate(std::vector<double> &ref_coordinate);
 
     /**
      *
@@ -98,7 +96,7 @@ public:
      * @param glob_min_coord
      * @param penalty_coef
      */
-    LIB4NEURO_API double change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef=0.25);
+     double change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef=0.25);
 };
 
 
@@ -149,13 +147,13 @@ protected:
      * @param val
      * @return
      */
-    LIB4NEURO_API Particle* determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val);
+     Particle* determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val);
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_centroid_coordinates();
+     std::vector<double>* get_centroid_coordinates();
 
     /**
      *
@@ -164,7 +162,7 @@ protected:
      * @param n
      * @return
      */
-    LIB4NEURO_API double get_euclidean_distance(std::vector<double>* a, std::vector<double>* b);
+     double get_euclidean_distance(std::vector<double>* a, std::vector<double>* b);
 
 public:
 
@@ -180,7 +178,7 @@ public:
      * @param iter_max
      */
      //TODO make domain_bounds constant
-    LIB4NEURO_API ParticleSwarm(
+     ParticleSwarm(
             std::vector<double> *domain_bounds,
             double c1 = 1.711897,
             double c2 = 1.711897,
@@ -195,7 +193,7 @@ public:
     /**
      *
      */
-    LIB4NEURO_API ~ParticleSwarm( );
+     ~ParticleSwarm( );
 
 
     /**
@@ -204,13 +202,13 @@ public:
      * @param epsilon
      * @param delta
      */
-    LIB4NEURO_API void optimize( ErrorFunction &ef ) override;
+     void optimize( ErrorFunction &ef ) override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_parameters( ) override;
+     std::vector<double>* get_parameters( ) override;
 
 
 };
diff --git a/src/NetConnection/ConnectionFunctionGeneral.cpp b/src/NetConnection/ConnectionFunctionGeneral.cpp
index 9b1b3e645541284985ea7011158d0033ca59a5aa..a3f410327cfe5d00dfbd0a73e7ddd6b5b491b88c 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.cpp
+++ b/src/NetConnection/ConnectionFunctionGeneral.cpp
@@ -25,4 +25,5 @@ double ConnectionFunctionGeneral::eval( std::vector<double> &parameter_space ) {
 
 void ConnectionFunctionGeneral::eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) {
     //TODO
+    throw std::invalid_argument("Warning: ConnectionFunctionGeneral::eval_partial_derivative is not yet implemented\n");
 }
diff --git a/src/NetConnection/ConnectionFunctionGeneral.h b/src/NetConnection/ConnectionFunctionGeneral.h
index d19bb30cb20b9719fc12b648249d3291ea19a3ea..4612c980da79301950e39dc63562a95996f3adf4 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.h
+++ b/src/NetConnection/ConnectionFunctionGeneral.h
@@ -8,8 +8,6 @@
 #ifndef INC_4NEURO_CONNECTIONWEIGHT_H
 #define INC_4NEURO_CONNECTIONWEIGHT_H
 
-#include "../settings.h"
-
 #include <boost/archive/text_oarchive.hpp>
 #include <boost/archive/text_iarchive.hpp>
 #include <boost/serialization/export.hpp>
@@ -38,32 +36,32 @@ public:
     /**
      *
      */
-    LIB4NEURO_API ConnectionFunctionGeneral();
+     ConnectionFunctionGeneral();
 
     /**
      *
      * @param param_count
      * @param f
      */
-    LIB4NEURO_API ConnectionFunctionGeneral(std::vector<size_t> &param_indices, std::string &function_string);
+     ConnectionFunctionGeneral(std::vector<size_t> &param_indices, std::string &function_string);
 
     /**
      *
      */
-    LIB4NEURO_API virtual ~ConnectionFunctionGeneral( );
+     virtual ~ConnectionFunctionGeneral( );
 
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual double eval( std::vector<double> &parameter_space );
+     virtual double eval( std::vector<double> &parameter_space );
 
     /**
      * Performs partial derivative of this transfer function according to all parameters. Adds the values multiplied
      * by alpha to the corresponding gradient vector
      */
-    LIB4NEURO_API virtual void eval_partial_derivative( std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha );
+     virtual void eval_partial_derivative( std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha );
 
 };
 
diff --git a/src/NetConnection/ConnectionFunctionIdentity.h b/src/NetConnection/ConnectionFunctionIdentity.h
index 3478d9a248bbd9660876b09700feb23906c136bb..68150e207ab351cd0c8b2765c7de1659c764f6a2 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.h
+++ b/src/NetConnection/ConnectionFunctionIdentity.h
@@ -8,8 +8,6 @@
 #ifndef INC_4NEURO_CONNECTIONWEIGHTIDENTITY_H
 #define INC_4NEURO_CONNECTIONWEIGHTIDENTITY_H
 
-#include "../settings.h"
-
 #include "ConnectionFunctionGeneral.h"
 
 class ConnectionFunctionGeneral;
@@ -40,25 +38,25 @@ public:
     /**
      *
      */
-    LIB4NEURO_API ConnectionFunctionIdentity( );
+     ConnectionFunctionIdentity( );
 
     /**
      *
      */
-    LIB4NEURO_API ConnectionFunctionIdentity( size_t pidx );
+     ConnectionFunctionIdentity( size_t pidx );
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API double eval( std::vector<double> &parameter_space ) override;
+     double eval( std::vector<double> &parameter_space ) override;
 
     /**
      *
      * @param weight_gradient
      * @param alpha
      */
-    LIB4NEURO_API void eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) override;
+     void eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) override;
 };
 
 
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index e33c5b424a785756a283e48c5ff0c4d5127ab4fb..027b4380a69b0c51b323461b18b4f2ff02b2bb80 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -520,6 +520,66 @@ void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double>
     }
 }
 
+void NeuralNetwork::add_to_gradient_single( std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling, std::vector<double> &gradient) {
+
+    std::vector<double> scaling_backprog( this->get_n_neurons( ) );
+    std::fill( scaling_backprog.begin(), scaling_backprog.end(), 0.0);
+
+    size_t bias_shift = this->get_n_weights();
+    size_t neuron_idx;
+    int bias_idx;
+    double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;
+
+    NeuronDifferentiable *active_neuron;
+
+    /* initial error propagation */
+    std::vector<size_t>* current_layer = this->neuron_layers_feedforward->at(this->neuron_layers_feedforward->size() - 1);
+    //TODO might not work in the future as the output neurons could be permuted
+    for(size_t i = 0; i < current_layer->size(); ++i){
+        neuron_idx = current_layer->at( i );
+        scaling_backprog[neuron_idx] = error_derivative[ i ] * error_scaling;
+    }
+
+    /* we iterate through all the layers in reverse order and calculate partial derivatives scaled correspondingly */
+    for(size_t j = this->neuron_layers_feedforward->size(); j > 0; --j){
+
+        current_layer = this->neuron_layers_feedforward->at(j - 1);
+
+        for(size_t i = 0; i < current_layer->size(); ++i){
+
+            neuron_idx = current_layer->at( i );
+            active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons->at( neuron_idx ));
+
+            if( active_neuron ){
+                bias_idx = this->neuron_bias_indices->at( neuron_idx );
+                neuron_potential = this->neuron_potentials->at( neuron_idx );
+
+                if( bias_idx >= 0 ){
+                    neuron_bias = this->neuron_biases->at( bias_idx );
+                    gradient[ bias_shift + bias_idx ] += scaling_backprog[neuron_idx] * active_neuron->activation_function_eval_derivative_bias( neuron_potential, neuron_bias );
+                    scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(neuron_potential, neuron_bias);
+                }
+
+                /* connections to lower level neurons */
+                for(auto c: *this->inward_adjacency->at( neuron_idx )){
+                    size_t ti = c.first;
+                    size_t ci = c.second;
+
+                    neuron_potential_t = this->neuron_potentials->at( ti );
+                    connection_weight = this->connection_list->at( ci )->eval( *this->connection_weights );
+
+                    this->connection_list->at( ci )->eval_partial_derivative( *this->get_parameter_ptr_weights(), gradient, neuron_potential_t * scaling_backprog[neuron_idx] );
+
+                    scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
+                }
+            }
+            else{
+                throw std::invalid_argument("Neuron used in backpropagation does not contain differentiable activation function!\n");
+            }
+        }
+    }
+}
+
 void NeuralNetwork::randomize_weights( ) {
 
     boost::random::mt19937 gen;
@@ -545,6 +605,11 @@ void NeuralNetwork::randomize_biases( ) {
     }
 }
 
+void NeuralNetwork::randomize_parameters() {
+    this->randomize_biases();
+    this->randomize_weights();
+}
+
 size_t NeuralNetwork::get_n_inputs() {
     return this->input_neuron_indices->size();
 }
@@ -655,17 +720,17 @@ void NeuralNetwork::analyze_layer_structure() {
         this->neuron_layers_feedforward = nullptr;
     }
 
-    if(this->neuron_layers_feedbackward){
-        for(auto e: *this->neuron_layers_feedbackward){
-            delete e;
-            e = nullptr;
-        }
-        delete this->neuron_layers_feedbackward;
-        this->neuron_layers_feedbackward = nullptr;
-    }
+//    if(this->neuron_layers_feedbackward){
+//        for(auto e: *this->neuron_layers_feedbackward){
+//            delete e;
+//            e = nullptr;
+//        }
+//        delete this->neuron_layers_feedbackward;
+//        this->neuron_layers_feedbackward = nullptr;
+//    }
 
     this->neuron_layers_feedforward = new std::vector<std::vector<size_t>*>(0);
-    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
+//    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
 
 
     auto n = this->neurons->size();
@@ -734,48 +799,48 @@ void NeuralNetwork::analyze_layer_structure() {
     }
 
 
-    /* feed backward analysis */
-    active_set_size[0] = 0;
-    active_set_size[1] = 0;
-
-    idx1 = 0;
-    idx2 = 1;
-
-    active_set_size[0] = this->get_n_outputs();
-    for(i = 0; i < this->get_n_outputs(); ++i){
-        active_eval_set[i] = this->output_neuron_indices->at(i);
-    }
-
-    while(active_set_size[idx1] > 0){
-
-        /* we add the current active set as the new outward layer */
-        std::vector<size_t> *new_feedbackward_layer = new std::vector<size_t>(active_set_size[idx1]);
-        this->neuron_layers_feedbackward->push_back( new_feedbackward_layer );
-
-        //we iterate through the active neurons and propagate the signal backward
-        for(i = 0; i < active_set_size[idx1]; ++i){
-            active_ni = active_eval_set[i + n * idx1];
-            new_feedbackward_layer->at( i ) = active_ni;
-
-            if(!this->inward_adjacency->at(active_ni)){
-                continue;
-            }
-
-            for(auto ni: *(this->inward_adjacency->at(active_ni))){
-                outward_saturation[ni.first]--;
-
-                if(outward_saturation[ni.first] == 0){
-                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
-                    active_set_size[idx2]++;
-                }
-            }
-        }
-
-        idx1 = idx2;
-        idx2 = (idx1 + 1) % 2;
-
-        active_set_size[idx2] = 0;
-    }
+//    /* feed backward analysis */
+//    active_set_size[0] = 0;
+//    active_set_size[1] = 0;
+//
+//    idx1 = 0;
+//    idx2 = 1;
+//
+//    active_set_size[0] = this->get_n_outputs();
+//    for(i = 0; i < this->get_n_outputs(); ++i){
+//        active_eval_set[i] = this->output_neuron_indices->at(i);
+//    }
+//
+//    while(active_set_size[idx1] > 0){
+//
+//        /* we add the current active set as the new outward layer */
+//        std::vector<size_t> *new_feedbackward_layer = new std::vector<size_t>(active_set_size[idx1]);
+//        this->neuron_layers_feedbackward->push_back( new_feedbackward_layer );
+//
+//        //we iterate through the active neurons and propagate the signal backward
+//        for(i = 0; i < active_set_size[idx1]; ++i){
+//            active_ni = active_eval_set[i + n * idx1];
+//            new_feedbackward_layer->at( i ) = active_ni;
+//
+//            if(!this->inward_adjacency->at(active_ni)){
+//                continue;
+//            }
+//
+//            for(auto ni: *(this->inward_adjacency->at(active_ni))){
+//                outward_saturation[ni.first]--;
+//
+//                if(outward_saturation[ni.first] == 0){
+//                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
+//                    active_set_size[idx2]++;
+//                }
+//            }
+//        }
+//
+//        idx1 = idx2;
+//        idx2 = (idx1 + 1) % 2;
+//
+//        active_set_size[idx2] = 0;
+//    }
 
     this->layers_analyzed = true;
 }
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index 7381ccd9dc2119d90d01fc23434e9873ec04c4f3..09aefbb3aada4de96bc3b31dec7a91523fba04a5 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -11,7 +11,7 @@
 #ifndef INC_4NEURO_NEURALNETWORK_H
 #define INC_4NEURO_NEURALNETWORK_H
 
-#include "../settings.h"
+
 
 #include <iostream>
 #include <vector>
@@ -182,17 +182,17 @@ public:
     /**
      *
      */
-    LIB4NEURO_API explicit NeuralNetwork();
+     explicit NeuralNetwork();
 
     /**
      *
      */
-    LIB4NEURO_API explicit NeuralNetwork(std::string filepath);
+     explicit NeuralNetwork(std::string filepath);
 
     /**
      *
      */
-    LIB4NEURO_API virtual ~NeuralNetwork();
+     virtual ~NeuralNetwork();
 
     /**
      * If possible, returns a neural net with 'input_neuron_indices' neurons as inputs and 'output_neuron_indices' as
@@ -202,20 +202,20 @@ public:
      * @param output_neuron_indices
      * @return
      */
-    LIB4NEURO_API NeuralNetwork* get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices);
+     NeuralNetwork* get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices);
 
     /**
      * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values
      * @param parameters
      */
-    LIB4NEURO_API virtual void copy_parameter_space(std::vector<double> *parameters);
+     virtual void copy_parameter_space(std::vector<double> *parameters);
 
     /**
      * Copies the pointers @{this->connection_weights} and @{this->neuron_biases} from the parental network, sets
      * flags to not delete the vectors in this object
      * @param parent_network
      */
-    LIB4NEURO_API virtual void set_parameter_space_pointers( NeuralNetwork &parent_network );
+     virtual void set_parameter_space_pointers( NeuralNetwork &parent_network );
 
     /**
      *
@@ -223,14 +223,23 @@ public:
      * @param output
      * @param custom_weights_and_biases
      */
-    LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
+     virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
+
+
+
+     /**
+      *
+      * @param error_derivative
+      * @param gradient
+      */
+     virtual void add_to_gradient_single( std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling, std::vector<double> &gradient);
 
     /**
      * Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
      * @param[in] n
      * @return
      */
-    LIB4NEURO_API size_t add_neuron(Neuron* n, BIAS_TYPE bt = BIAS_TYPE::NEXT_BIAS, size_t bias_idx = 0);
+     size_t add_neuron(Neuron* n, BIAS_TYPE bt = BIAS_TYPE::NEXT_BIAS, size_t bias_idx = 0);
 
     /**
      *
@@ -238,7 +247,7 @@ public:
      * @param n2_idx
      * @return
      */
-    LIB4NEURO_API size_t add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct = SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT, size_t weight_idx = 0 );
+     size_t add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct = SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT, size_t weight_idx = 0 );
 
     /**
      * Take the existing connection with index 'connection_idx' in 'parent_network' and adds it to the structure of this
@@ -248,94 +257,99 @@ public:
      * @param connection_idx
      * @param parent_network
      */
-    LIB4NEURO_API void add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx, NeuralNetwork &parent_network );
+     void add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx, NeuralNetwork &parent_network );
 
 
     /**
      *
      */
-    LIB4NEURO_API void randomize_weights();
+     void randomize_weights();
 
     /**
      *
      */
-    LIB4NEURO_API void randomize_biases();
+     void randomize_biases();
+
+     /**
+      *
+      */
+     void randomize_parameters();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_inputs();
+     virtual size_t get_n_inputs();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_outputs();
+     virtual size_t get_n_outputs();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_weights();
+     virtual size_t get_n_weights();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_biases();
+     virtual size_t get_n_biases();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual int get_neuron_bias_index( size_t neuron_idx );
+     virtual int get_neuron_bias_index( size_t neuron_idx );
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_neurons();
+     virtual size_t get_n_neurons();
 
     /**
      *
      * @param input_neurons_indices
      */
-    LIB4NEURO_API void specify_input_neurons(std::vector<size_t> &input_neurons_indices);
+     void specify_input_neurons(std::vector<size_t> &input_neurons_indices);
 
     /**
      *
      * @param output_neurons_indices
      */
-    LIB4NEURO_API void specify_output_neurons(std::vector<size_t> &output_neurons_indices);
+     void specify_output_neurons(std::vector<size_t> &output_neurons_indices);
 
     /**
      *
      */
-    LIB4NEURO_API void print_weights();
+     void print_weights();
 
     /**
      *
      */
-    LIB4NEURO_API void print_stats();
+     void print_stats();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_parameter_ptr_weights();
+     virtual std::vector<double>* get_parameter_ptr_weights();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_parameter_ptr_biases();
+     virtual std::vector<double>* get_parameter_ptr_biases();
 
     /**
      *
      * @param filepath
      */
-    LIB4NEURO_API void save_text(std::string filepath);
+     void save_text(std::string filepath);
 
 };
 
diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp
index 2b665fe77060b926d534c6e7149c653f8959be12..b0cfc1ccec1eae545230da5ed0ced4873cba381f 100644
--- a/src/Network/NeuralNetworkSum.cpp
+++ b/src/Network/NeuralNetworkSum.cpp
@@ -70,6 +70,20 @@ void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<doubl
 
 }
 
+void NeuralNetworkSum::add_to_gradient_single( std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling, std::vector<double> &gradient) {
+
+    NeuralNetwork *SUM;
+
+    for(size_t ni = 0; ni < this->summand->size(); ++ni){
+        SUM = this->summand->at(ni);
+
+        if( SUM ){
+            double alpha = this->summand_coefficient->at(ni)->eval(input);
+            SUM->add_to_gradient_single( input, error_derivative, alpha * error_scaling, gradient );
+        }
+    }
+}
+
 size_t NeuralNetworkSum::get_n_weights(){
     //TODO insufficient solution, assumes the networks share weights
     if(this->summand){
@@ -113,4 +127,21 @@ size_t NeuralNetworkSum::get_n_outputs() {
     }
 
     return 0;
-}
\ No newline at end of file
+}
+
+std::vector<double>* NeuralNetworkSum::get_parameter_ptr_weights() {
+    if(this->summand){
+        return this->summand->at(0)->get_parameter_ptr_weights();
+    }
+
+    return nullptr;
+}
+
+std::vector<double>* NeuralNetworkSum::get_parameter_ptr_biases() {
+    if(this->summand){
+        return this->summand->at(0)->get_parameter_ptr_biases();
+    }
+
+    return nullptr;
+}
+
diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h
index 3ce90b748235c33e1893076752d341745e923fcb..8ce7dcd6ecab44d62517c4cb09040e333a106405 100644
--- a/src/Network/NeuralNetworkSum.h
+++ b/src/Network/NeuralNetworkSum.h
@@ -8,7 +8,7 @@
 #ifndef INC_4NEURO_NEURALNETWORKSUM_H
 #define INC_4NEURO_NEURALNETWORKSUM_H
 
-#include "../settings.h"
+
 
 #include "NeuralNetwork.h"
 
@@ -29,42 +29,63 @@ private:
     };
 
 public:
-    LIB4NEURO_API NeuralNetworkSum( );
-    LIB4NEURO_API virtual ~NeuralNetworkSum( );
+     NeuralNetworkSum( );
+     virtual ~NeuralNetworkSum( );
+
+     void add_network( NeuralNetwork *net, std::string expression_string );
 
-    LIB4NEURO_API void add_network( NeuralNetwork *net, std::string expression_string );
+     void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases) override;
+
+    /**
+     *
+     * @param error_derivative
+     * @param gradient
+     */
+    void add_to_gradient_single( std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling, std::vector<double> &gradient) override;
 
-    LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
+    /**
+     *
+     * @return
+     */
+     size_t get_n_inputs() override;
+
+    /**
+     *
+     * @return
+     */
+     size_t get_n_outputs() override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_inputs() override;
+     size_t get_n_weights() override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API size_t get_n_outputs() override;
+     size_t get_n_biases() override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_weights() override;
+     size_t get_n_neurons() override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_biases() override;
+     //TODO only works if all the networks share the same parameters
+    std::vector<double>* get_parameter_ptr_weights() override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual size_t get_n_neurons() override;
+    //TODO only works if all the networks share the same parameters
+    std::vector<double>* get_parameter_ptr_biases() override;
 };
 
 
diff --git a/src/Neuron/Neuron.h b/src/Neuron/Neuron.h
index 275f1ca7d96def5fc6dce0d569bcccf778a74f73..378d013d40b1a5cb4c21da0b6f9fee7afab4de43 100644
--- a/src/Neuron/Neuron.h
+++ b/src/Neuron/Neuron.h
@@ -7,10 +7,10 @@
  * @date 2017 - 2018
  */
 //TODO  correct docs in this and all child classes
- #ifndef NEURON_H_
- #define NEURON_H_
+#ifndef NEURON_H_
+#define NEURON_H_
+
 
- #include "../settings.h"
 
 #include <boost/serialization/base_object.hpp>
 #include <vector>
@@ -34,23 +34,22 @@ public:
      * this level deallocates the array 'activation_function_parameters'
      * also deallocates the OUTGOING connections
      */
-    LIB4NEURO_API virtual ~Neuron();
+     virtual ~Neuron();
 
     /**
      * Performs the activation function and returns the result
      */
-    LIB4NEURO_API virtual double activate( double x, double b ) = 0;
+     virtual double activate( double x, double b ) = 0;
 
 }; /* end of Neuron class */
 
-
 /**
  * Class serving as an interface providing 'activation_function_eval_partial_derivative',
  * 'activation_function_eval_derivative',  'get_partial_derivative' and
  * 'get_derivative' methods.
  */
-class IDifferentiable {
-
+class NeuronDifferentiable: public Neuron {
+public:
     /**
      * Calculates the derivative with respect to the argument, ie the 'potential'
      * @return f'(x), where 'f(x)' is the activation function and 'x' = 'potential'
@@ -70,7 +69,7 @@ class IDifferentiable {
      * @return
      */
     virtual Neuron* get_derivative( ) = 0;
+};
 
-}; /* end of IDifferentiable class */
 
  #endif /* NEURON_H_ */
\ No newline at end of file
diff --git a/src/Neuron/NeuronBinary.h b/src/Neuron/NeuronBinary.h
index 15891181ece3c961084b20413bda4b685da6889e..8cdb0afd3e03e9ce47d65033fbfe63e5d5e4984e 100644
--- a/src/Neuron/NeuronBinary.h
+++ b/src/Neuron/NeuronBinary.h
@@ -10,7 +10,7 @@
 #ifndef INC_4NEURO_NEURONBINARY_H
 #define INC_4NEURO_NEURONBINARY_H
 
-#include "../settings.h"
+
 
 #include "Neuron.h"
 
@@ -33,12 +33,12 @@ public:
      * @param[in] threshold Denotes when the neuron is activated
      * When neuron potential exceeds 'threshold' value it becomes excited
      */
-    LIB4NEURO_API explicit NeuronBinary( );
+     explicit NeuronBinary( );
 
     /**
      * Performs the activation function and stores the result into the 'state' property
      */
-    LIB4NEURO_API double activate( double x, double b ) override;
+     double activate( double x, double b ) override;
 
 };
 
diff --git a/src/Neuron/NeuronConstant.h b/src/Neuron/NeuronConstant.h
index 215cf2a182c335b90853261b16ae1d5076893490..0c648263f57e002a07e87c1c7668c465dc182557 100644
--- a/src/Neuron/NeuronConstant.h
+++ b/src/Neuron/NeuronConstant.h
@@ -8,11 +8,11 @@
 #ifndef INC_4NEURO_NEURONCONSTANT_H
 #define INC_4NEURO_NEURONCONSTANT_H
 
-#include "../settings.h"
+
 
 #include "Neuron.h"
 
-class NeuronConstant: public Neuron, public IDifferentiable {
+class NeuronConstant: public NeuronDifferentiable {
 private:
     friend class boost::serialization::access;
 
@@ -32,12 +32,12 @@ public:
      * f(x) = c
      * @param[in] c Constant value
      */
-    LIB4NEURO_API explicit NeuronConstant( double c = 0.0 );
+     explicit NeuronConstant( double c = 0.0 );
 
     /**
      * Evaluates and returns 'c'
      */
-    LIB4NEURO_API double activate( double x, double b ) override;
+     double activate( double x, double b ) override;
 
     /**
      * Calculates the partial derivative of the activation function
@@ -45,19 +45,19 @@ public:
      * @return Partial derivative of the activation function according to the
      * 'bias' parameter. Returns 0.0
      */
-    LIB4NEURO_API double activation_function_eval_derivative_bias( double x, double b ) override;
+     double activation_function_eval_derivative_bias( double x, double b ) override;
 
     /**
      * Calculates d/dx of (c) at point x
      * @return 0.0
      */
-    LIB4NEURO_API double activation_function_eval_derivative( double x, double b ) override;
+     double activation_function_eval_derivative( double x, double b ) override;
 
     /**
      * Returns a pointer to a Neuron with derivative as its activation function
      * @return
      */
-    LIB4NEURO_API Neuron* get_derivative( ) override;
+     Neuron* get_derivative( ) override;
 };
 
 #endif //INC_4NEURO_NEURONCONSTANT_H
diff --git a/src/Neuron/NeuronLinear.h b/src/Neuron/NeuronLinear.h
index 390916f71706b7f60357e302ebc29739dbd46b50..4b2ef904a5ac874dcf95102d80b9f3aca2c46540 100644
--- a/src/Neuron/NeuronLinear.h
+++ b/src/Neuron/NeuronLinear.h
@@ -10,8 +10,6 @@
 #ifndef INC_4NEURO_NEURONLINEAR_H
 #define INC_4NEURO_NEURONLINEAR_H
 
-#include "../settings.h"
-
 #include "Neuron.h"
 #include "NeuronConstant.h"
 #include <boost/serialization/base_object.hpp>
@@ -21,7 +19,7 @@
  * Linear neuron class - uses activation function in the form f(x)=a*x + b,
  * 'x' being the neuron's potential
  */
-class NeuronLinear:public Neuron, public IDifferentiable {
+class NeuronLinear:public NeuronDifferentiable {
 private:
     friend class boost::serialization::access;
 
@@ -37,12 +35,12 @@ public:
      * f(x) = x + b
      * @param[in] b Bias
      */
-    LIB4NEURO_API explicit NeuronLinear( );
+     explicit NeuronLinear( );
 
     /**
      * Evaluates 'x + b' and stores the result into the 'state' property
      */
-    LIB4NEURO_API double activate( double x, double b ) override;
+     double activate( double x, double b ) override;
 
     /**
      * Calculates the partial derivative of the activation function
@@ -50,19 +48,19 @@ public:
      * @return Partial derivative of the activation function according to the
      * 'bias' parameter. Returns 1.0
      */
-    LIB4NEURO_API double activation_function_eval_derivative_bias( double x, double b ) override;
+     double activation_function_eval_derivative_bias( double x, double b ) override;
 
     /**
      * Calculates d/dx of (x + b) at point x
      * @return 1.0
      */
-    LIB4NEURO_API double activation_function_eval_derivative( double x, double b ) override;
+     double activation_function_eval_derivative( double x, double b ) override;
 
     /**
      * Returns a pointer to a Neuron with derivative as its activation function
      * @return
      */
-    LIB4NEURO_API Neuron* get_derivative( ) override;
+     Neuron* get_derivative( ) override;
 
 };
 
diff --git a/src/Neuron/NeuronLogistic.h b/src/Neuron/NeuronLogistic.h
index 950063efa06ce86a95b99ea54c36b2361b3a51a1..bd7361d7cf848e86334750ee563a9cf0115fa1f7 100644
--- a/src/Neuron/NeuronLogistic.h
+++ b/src/Neuron/NeuronLogistic.h
@@ -10,14 +10,14 @@
 #ifndef INC_4NEURO_NEURONLOGISTIC_H
 #define INC_4NEURO_NEURONLOGISTIC_H
 
-#include "../settings.h"
+
 
 #include <cmath>
 #include "Neuron.h"
 #include "../constants.h"
 
 
-class NeuronLogistic:public Neuron, public IDifferentiable {
+class NeuronLogistic:public NeuronDifferentiable {
     friend class boost::serialization::access;
 
 protected:
@@ -32,12 +32,12 @@ public:
      * Constructs the object of the Logistic neuron with activation function
      * f(x) = (1 + e^(-x + b))^(-1)
      */
-    LIB4NEURO_API explicit NeuronLogistic( );
+     explicit NeuronLogistic( );
 
     /**
      * Evaluates '(1 + e^(-x + b))^(-1)' and stores the result into the 'state' property
      */
-    LIB4NEURO_API virtual double activate( double x, double b ) override;
+     virtual double activate( double x, double b ) override;
 
     /**
      * Calculates the partial derivative of the activation function
@@ -45,18 +45,18 @@ public:
      * @return Partial derivative of the activation function according to the
      * bias, returns: -e^(b - x)/(e^(b - x) + 1)^2
      */
-    LIB4NEURO_API virtual double activation_function_eval_derivative_bias( double x, double b ) override;
+     virtual double activation_function_eval_derivative_bias( double x, double b ) override;
     /**
      * Calculates d/dx of (1 + e^(-x + b))^(-1)
      * @return e^(b - x)/(e^(b - x) + 1)^2
      */
-    LIB4NEURO_API virtual double activation_function_eval_derivative( double x, double b ) override;
+     virtual double activation_function_eval_derivative( double x, double b ) override;
 
     /**
      * Returns a pointer to a Neuron with derivative as its activation function
      * @return
      */
-    LIB4NEURO_API virtual NeuronLogistic* get_derivative( ) override;
+     virtual NeuronLogistic* get_derivative( ) override;
 };
 
 
@@ -77,12 +77,12 @@ public:
      * f(x) = e^(b - x)/(e^(b - x) + 1)^2
      * @param[in] b Bias
      */
-    LIB4NEURO_API explicit NeuronLogistic_d1( );
+     explicit NeuronLogistic_d1( );
 
     /**
      * Evaluates 'e^(b - x)/(e^(b - x) + 1)^2' and returns the result
      */
-    LIB4NEURO_API virtual double activate( double x, double b ) override;
+     virtual double activate( double x, double b ) override;
 
     /**
      * Calculates the partial derivative of the activation function
@@ -90,19 +90,19 @@ public:
      * @return Partial derivative of the activation function according to the
      * bias, returns: (e^(b + x) (e^x - e^b))/(e^b + e^x)^3
      */
-    LIB4NEURO_API virtual double activation_function_eval_derivative_bias( double x, double b ) override;
+     virtual double activation_function_eval_derivative_bias( double x, double b ) override;
 
     /**
      * Calculates d/dx of  e^(b - x)*(1 + e^(b - x))^(-2)
      * @return  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
      */
-    LIB4NEURO_API virtual double activation_function_eval_derivative( double x, double b ) override;
+     virtual double activation_function_eval_derivative( double x, double b ) override;
 
     /**
      * Returns a pointer to a Neuron with derivative as its activation function
      * @return
      */
-    LIB4NEURO_API virtual NeuronLogistic* get_derivative( ) override;
+     virtual NeuronLogistic* get_derivative( ) override;
 };
 
 
@@ -125,12 +125,12 @@ public:
      * Constructs the object of the Logistic neuron with activation function
      * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
      */
-    LIB4NEURO_API explicit NeuronLogistic_d2( );
+     explicit NeuronLogistic_d2( );
 
     /**
      * Evaluates '(e^(b + x) (e^b - e^x))/(e^b + e^x)^3' and returns the result
      */
-    LIB4NEURO_API virtual double activate( double x, double b ) override;
+     virtual double activate( double x, double b ) override;
 
     /**
      * Calculates the partial derivative of the activation function
@@ -138,19 +138,19 @@ public:
      * @return Partial derivative of the activation function according to the
      * bias, returns: -(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
      */
-    LIB4NEURO_API virtual double activation_function_eval_derivative_bias( double x, double b ) override;
+     virtual double activation_function_eval_derivative_bias( double x, double b ) override;
 
     /**
      * Calculates d/dx of  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
      * @return (e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
      */
-    LIB4NEURO_API virtual double activation_function_eval_derivative( double x, double b ) override;
+     virtual double activation_function_eval_derivative( double x, double b ) override;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual NeuronLogistic* get_derivative( ) override;
+     virtual NeuronLogistic* get_derivative( ) override;
 
 };
 
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 7889202723aa495cd16ad20f08146a6f7c78fa9f..bfebaecf7aeb7a83beb8b9fdf4cf109613df7283 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -356,12 +356,19 @@ void DESolver::solve( ILearningMethods &learning_method ) {
         }
     }
 
-    this->solution->randomize_weights();
-    this->solution->randomize_biases();
+    printf("error before optimization: %f\n", total_error.eval( nullptr ));
 
     learning_method.optimize( total_error );
-
     this->solution->copy_parameter_space( learning_method.get_parameters( ) );
+
+    printf("error after optimization: %f\n", total_error.eval( nullptr ));
+}
+
+void DESolver::randomize_parameters( ) {
+
+    MultiIndex alpha( this->dim_i );
+    this->map_multiindices2nn[ alpha ]->randomize_parameters( );
+
 }
 
 NeuralNetwork* DESolver::get_solution( MultiIndex &alpha ) {
diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h
index 68a3f978c09143b05feaa7c8287dbb5ff4173961..cf414fc8e9e92fca8f770d3e593d441b931f65a8 100644
--- a/src/Solvers/DESolver.h
+++ b/src/Solvers/DESolver.h
@@ -13,7 +13,7 @@
 #ifndef INC_4NEURO_PDESOLVER_H
 #define INC_4NEURO_PDESOLVER_H
 
-#include "../settings.h"
+
 
 #include <map>
 #include "../DataSet/DataSet.h"
@@ -44,7 +44,7 @@ public:
      *
      * @param dimension
      */
-    LIB4NEURO_API MultiIndex(size_t dimension);
+     MultiIndex(size_t dimension);
 
 
     /**
@@ -52,13 +52,13 @@ public:
      * @param index
      * @param value
      */
-    LIB4NEURO_API void set_partial_derivative(size_t index, size_t value);
+     void set_partial_derivative(size_t index, size_t value);
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<size_t>* get_partial_derivatives_degrees( ) ;
+     std::vector<size_t>* get_partial_derivatives_degrees( ) ;
 
 
     /**
@@ -66,19 +66,19 @@ public:
      * @param rhs
      * @return
      */
-    LIB4NEURO_API bool operator <(const MultiIndex& rhs) const;
+     bool operator <(const MultiIndex& rhs) const;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::string to_string( ) const ;
+     std::string to_string( ) const ;
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API size_t get_degree( ) const ;
+     size_t get_degree( ) const ;
 };
 
 
@@ -111,12 +111,12 @@ public:
      * @param n_inputs
      * @param m
      */
-    LIB4NEURO_API DESolver( size_t n_equations, size_t n_inputs, size_t m );
+     DESolver( size_t n_equations, size_t n_inputs, size_t m );
 
     /**
      * default destructor
      */
-    LIB4NEURO_API ~DESolver( );
+     ~DESolver( );
 
     /**
      * Adds a new summand multiplied by 'beta' into the 'equation_idx'-th differential equation
@@ -124,7 +124,7 @@ public:
      * @param alpha
      * @param beta
      */
-    LIB4NEURO_API void add_to_differential_equation( size_t equation_idx, MultiIndex &alpha, std::string expression_string );
+     void add_to_differential_equation( size_t equation_idx, MultiIndex &alpha, std::string expression_string );
 
 
     /**
@@ -132,7 +132,7 @@ public:
      * @param equation_idx
      * @param expression_string
      */
-    LIB4NEURO_API void add_to_differential_equation( size_t equation_idx, std::string expression_string );
+     void add_to_differential_equation( size_t equation_idx, std::string expression_string );
 
     /**
      * Sets the error function for the differential equation with the corresponding index
@@ -140,28 +140,35 @@ public:
      * @param F
      * @param conditions
      */
-    LIB4NEURO_API void set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions);
-
+     void set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions);
 
+    /**
+     *
+     * @param learning_method
+     */
+     void solve( ILearningMethods &learning_method );
 
-    LIB4NEURO_API void solve( ILearningMethods &learning_method );
+     /**
+      *
+      */
+     void randomize_parameters( );
 
     /**
      * returns the pointer to the object representing the given partial derivative of the solution
      * @return
      */
-    LIB4NEURO_API NeuralNetwork* get_solution( MultiIndex &alpha );
+     NeuralNetwork* get_solution( MultiIndex &alpha );
 
     /**
      * For testing purposes only
      */
-     LIB4NEURO_API double eval_equation( size_t equation_idx, std::vector<double> *weights_and_biases, std::vector<double> &input );
+      double eval_equation( size_t equation_idx, std::vector<double> *weights_and_biases, std::vector<double> &input );
 
      /**
       * For testing purposes only
       * @return
       */
-     LIB4NEURO_API double eval_total_error( std::vector<double> &weights_and_biases );
+      double eval_total_error( std::vector<double> &weights_and_biases );
 };
 
 
diff --git a/src/examples/main.cpp b/src/examples/main.cpp
index c2d3a38cf22ae01b7795437630e918a7fa605c10..bfa0ee8854fd65d6726556cd1d7ff6aae926867f 100644
--- a/src/examples/main.cpp
+++ b/src/examples/main.cpp
@@ -12,7 +12,7 @@
 #include <utility>
 #include <algorithm>
 
-#include "4neuro.h"
+#include "lib4neuro.h"
 
 
 int main(int argc, char** argv){
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index 0cd91a097b7a6bf05167a7d71e8d71153fd5ca71..480ac51f16feb17b77338c779198bd0bb6678f6c 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -7,12 +7,74 @@
 //
 
 #include <vector>
+#include <iostream>
 
-#include "4neuro.h"
+#include "lib4neuro.h"
+#include "../LearningMethods/GradientDescent.h"
+
+void optimize_via_particle_swarm( NeuralNetwork &net, ErrorFunction &ef ){
+
+    /* TRAINING METHOD SETUP */
+    std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
+
+    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i + 1] = 10;
+    }
+
+    double c1 = 1.7;
+    double c2 = 1.7;
+    double w = 0.7;
+    size_t n_particles = 50;
+    size_t iter_max = 1000;
+
+    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
+     * terminating criterion is met */
+    double gamma = 0.5;
+
+    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
+     * terminating criterion is met ('n' is the total number of particles) */
+    double epsilon = 0.02;
+    double delta = 0.7;
+
+    ParticleSwarm swarm_01(
+            &domain_bounds,
+            c1,
+            c2,
+            w,
+            gamma,
+            epsilon,
+            delta,
+            n_particles,
+            iter_max
+    );
+    swarm_01.optimize( ef );
+
+    std::vector<double> *parameters = swarm_01.get_parameters();
+    net.copy_parameter_space(parameters);
+
+    /* ERROR CALCULATION */
+    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
+    std::cout << "***********************************************************************************************************************" <<std::endl;
+}
+
+void optimize_via_gradient_descent( NeuralNetwork &net, ErrorFunction &ef ){
+
+    std::cout << "***********************************************************************************************************************" <<std::endl;
+    GradientDescent gd( 1e-6, 1000 );
+
+    gd.optimize( ef );
+
+    std::vector<double> *parameters = gd.get_parameters();
+    net.copy_parameter_space(parameters);
+
+    /* ERROR CALCULATION */
+    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr ) << std::endl;
+}
 
 int main() {
 
-    std::cout << "Running lib4neuro example   1: Basic use of the particle swarm method to train a simple network with few linear neurons" << std::endl;
+    std::cout << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons" << std::endl;
     std::cout << "***********************************************************************************************************************" <<std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50" << std::endl;
@@ -55,7 +117,7 @@ int main() {
     net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
     net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
 
-    //net.randomize_weights();
+
 
     /* specification of the input/output neurons */
     std::vector<size_t> net_input_neurons_indices(2);
@@ -70,60 +132,15 @@ int main() {
     /* ERROR FUNCTION SPECIFICATION */
     MSE mse(&net, &ds);
 
-    /* TRAINING METHOD SETUP */
-    std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
-
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
-        domain_bounds[2 * i] = -10;
-        domain_bounds[2 * i + 1] = 10;
-    }
-
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
-    size_t n_particles = 50;
-    size_t iter_max = 1000;
-
-    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
-     * terminating criterion is met */
-    double gamma = 0.5;
-
-    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
-     * terminating criterion is met ('n' is the total number of particles) */
-    double epsilon = 0.02;
-    double delta = 0.7;
+    /* PARTICLE SWARM LEARNING */
+    net.randomize_parameters();
+    optimize_via_particle_swarm( net, mse );
 
-    ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
-    );
-    swarm_01.optimize( mse );
 
-    std::vector<double> *parameters = swarm_01.get_parameters();
-    net.copy_parameter_space(parameters);
+    /* GRADIENT DESCENT LEARNING */
+    net.randomize_parameters();
+    optimize_via_gradient_descent( net, mse );
 
-    printf("w1 = %10.7f\n", parameters->at( 0 ));
-    printf("w2 = %10.7f\n", parameters->at( 1 ));
-    std::cout << "***********************************************************************************************************************" <<std::endl;
-    /* ERROR CALCULATION */
-    double error = 0.0;
-    inp = {0, 1};
-    net.eval_single( inp, out );
-    error += (0.5 - out[0]) * (0.5 - out[0]);
-    std::cout << "x = (0,   1), expected output: 0.50, real output: " << out[0] << std::endl;
-
-    inp = {1, 0.5};
-    net.eval_single( inp, out );
-    error += (0.75 - out[0]) * (0.75 - out[0]);
-    std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
-    std::cout << "Run finished! Error of the network: " << 0.5 * error << std::endl;
 
 
 
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index 72f1dfd769c7cacba59440c25b8926f23b75876d..c54a3ce001cde245de584ec446b065a101cd3574 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -8,7 +8,66 @@
 
 #include <vector>
 
-#include "4neuro.h"
+#include "lib4neuro.h"
+
+void optimize_via_particle_swarm( NeuralNetwork &net, ErrorFunction &ef ){
+
+    /* TRAINING METHOD SETUP */
+    std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
+
+    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i + 1] = 10;
+    }
+
+    double c1 = 1.7;
+    double c2 = 1.7;
+    double w = 0.7;
+    size_t n_particles = 50;
+    size_t iter_max = 1000;
+
+    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
+     * terminating criterion is met */
+    double gamma = 0.5;
+
+    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
+     * terminating criterion is met ('n' is the total number of particles) */
+    double epsilon = 0.02;
+    double delta = 0.7;
+
+    ParticleSwarm swarm_01(
+            &domain_bounds,
+            c1,
+            c2,
+            w,
+            gamma,
+            epsilon,
+            delta,
+            n_particles,
+            iter_max
+    );
+    swarm_01.optimize( ef );
+
+    std::vector<double> *parameters = swarm_01.get_parameters();
+    net.copy_parameter_space(parameters);
+
+    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
+    std::cout << "***********************************************************************************************************************" <<std::endl;
+}
+
+void optimize_via_gradient_descent( NeuralNetwork &net, ErrorFunction &ef ){
+
+    GradientDescent gd( 1e-6, 1000 );
+
+    gd.optimize( ef );
+
+    std::vector<double> *parameters = gd.get_parameters();
+    net.copy_parameter_space(parameters);
+
+    /* ERROR CALCULATION */
+    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr )<< std::endl;
+    std::cout << "***********************************************************************************************************************" <<std::endl;
+}
 
 int main() {
     std::cout << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights" << std::endl;
@@ -63,8 +122,6 @@ int main() {
     net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
     net.add_connection_simple(idx4, idx5, SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
 
-    net.randomize_weights();
-
     /* specification of the input/output neurons */
     std::vector<size_t> net_input_neurons_indices(3);
     std::vector<size_t> net_output_neurons_indices(2);
@@ -84,75 +141,14 @@ int main() {
     /* COMPLEX ERROR FUNCTION SPECIFICATION */
     MSE mse(&net, &ds);
 
-//    double weights[2] = {-0.18012411, -0.17793740};
-//    double weights[2] = {1, 1};
-
-//    printf("evaluation of error at point (%f, %f) => %f\n", weights[0], weights[1], mse.eval(weights));
-
-    /* TRAINING METHOD SETUP */
-    std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
-
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
-        domain_bounds[2 * i] = -10;
-        domain_bounds[2 * i + 1] = 10;
-    }
-
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
-    size_t n_particles = 50;
-    size_t iter_max = 1000;
-
-    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
-     * terminating criterion is met */
-    double gamma = 0.5;
-
-    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
-     * terminating criterion is met ('n' is the total number of particles) */
-    double epsilon = 0.02;
-    double delta = 0.7;
-
-    ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
-    );
-    swarm_01.optimize( mse );
-
-    std::vector<double> *parameters = swarm_01.get_parameters();
-    net.copy_parameter_space(parameters);
-
-    printf("w1 = %10.7f\n", parameters->at( 0 ));
-    printf("w2 = %10.7f\n", parameters->at( 1 ));
-    printf("b1 = %10.7f\n", parameters->at( 2 ));
-    printf("b2 = %10.7f\n", parameters->at( 3 ));
-    printf("b3 = %10.7f\n", parameters->at( 4 ));
-    std::cout << "***********************************************************************************************************************" <<std::endl;
-
-    /* ERROR CALCULATION */
-    double error = 0.0;
-    inp = {0, 1, 0};
-    net.eval_single( inp, out );
-    error += (0.5 - out[0]) * (0.5 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
-    printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.5, 0.0, out[0], out[1]);
-
-    inp = {1, 0.5, 0};
-    net.eval_single( inp, out );
-    error += (0.75 - out[0]) * (0.75 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
-    printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.75, 0.0, out[0], out[1]);
+    /* PARTICLE SWARM LEARNING */
+    net.randomize_weights();
+    optimize_via_particle_swarm( net, mse );
 
-    inp = {0, 0, 1.25};
-    net.eval_single( inp, out );
-    error += (0.0 - out[0]) * (0.0 - out[0]) + (0.63 - out[1]) * (0.63 - out[1]);
-    printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.0, 0.63, out[0], out[1]);
 
-    std::cout << "Run finished! Error of the network: " << error / 3.0 << std::endl;
+    /* GRADIENT DESCENT LEARNING */
+    net.randomize_weights();
+    optimize_via_gradient_descent( net, mse );
 
 
 
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index 69d18546f183f21b6d68d3b13adc1b2987b1be14..39555c33026ef1c589c3cda2f78e77e8060d43a7 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -11,7 +11,66 @@
 
 #include <vector>
 
-#include "4neuro.h"
+#include "lib4neuro.h"
+
+void optimize_via_particle_swarm( NeuralNetwork &net, ErrorFunction &ef ){
+
+    /* TRAINING METHOD SETUP */
+    std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
+
+    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i + 1] = 10;
+    }
+
+    double c1 = 1.7;
+    double c2 = 1.7;
+    double w = 0.7;
+    size_t n_particles = 50;
+    size_t iter_max = 1000;
+
+    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
+     * terminating criterion is met */
+    double gamma = 0.5;
+
+    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
+     * terminating criterion is met ('n' is the total number of particles) */
+    double epsilon = 0.02;
+    double delta = 0.7;
+
+    ParticleSwarm swarm_01(
+            &domain_bounds,
+            c1,
+            c2,
+            w,
+            gamma,
+            epsilon,
+            delta,
+            n_particles,
+            iter_max
+    );
+    swarm_01.optimize( ef );
+
+    std::vector<double> *parameters = swarm_01.get_parameters();
+    net.copy_parameter_space(parameters);
+
+    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
+    std::cout << "***********************************************************************************************************************" <<std::endl;
+}
+
+void optimize_via_gradient_descent( NeuralNetwork &net, ErrorFunction &ef ){
+
+    GradientDescent gd( 1e-6, 1000 );
+
+    gd.optimize( ef );
+
+    std::vector<double> *parameters = gd.get_parameters();
+    net.copy_parameter_space(parameters);
+
+    /* ERROR CALCULATION */
+    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr )<< std::endl;
+    std::cout << "***********************************************************************************************************************" <<std::endl;
+}
 
 int main() {
     std::cout << "Running lib4neuro example   3: Use of the particle swarm method to train a set of networks sharing some edge weights" << std::endl;
@@ -104,43 +163,13 @@ int main() {
         mse_sum.add_error_function( &mse_01 );
         mse_sum.add_error_function( &mse_02 );
 
-        /* TRAINING METHOD SETUP */
-        std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
-
-        for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
-            domain_bounds[2 * i] = -10;
-            domain_bounds[2 * i + 1] = 10;
-        }
-
-        double c1 = 1.7;
-        double c2 = 1.7;
-        double w = 0.7;
-        size_t n_particles = 50;
-        size_t iter_max = 1000;
-
-        /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
-         * terminating criterion is met */
-        double gamma = 0.5;
-
-        /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
-         * terminating criterion is met ('n' is the total number of particles) */
-        double epsilon = 0.02;
-        double delta = 0.7;
-
-        ParticleSwarm swarm_01(
-                &domain_bounds,
-                c1,
-                c2,
-                w,
-                gamma,
-                epsilon,
-                delta,
-                n_particles,
-                iter_max
-        );
-        swarm_01.optimize( mse_sum );
-
+        /* PARTICLE SWARM LEARNING */
+        net.randomize_weights();
+        optimize_via_particle_swarm( net, mse_sum );
 
+        /* GRADIENT DESCENT LEARNING */
+        net.randomize_weights();
+        optimize_via_gradient_descent( net, mse_sum );
     }
     else{
         std::cout << "We apologize, this example is unfinished as we are in the process of developing methods for efficient subnetwork definition" << std::endl;
diff --git a/src/examples/net_test_harmonic_oscilator.cpp b/src/examples/net_test_harmonic_oscilator.cpp
index 6eecc425d72ebb638b2051defaa52a134c371698..257afdb6951e120f3ba38e463e368dd23799d3ca 100644
--- a/src/examples/net_test_harmonic_oscilator.cpp
+++ b/src/examples/net_test_harmonic_oscilator.cpp
@@ -11,59 +11,41 @@
 #include <iostream>
 #include <fstream>
 
-#include "../../include/4neuro.h"
+#include "lib4neuro.h"
 #include "../Solvers/DESolver.h"
 
-void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
-    std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-
-    /* SOLVER SETUP */
-    size_t n_inputs = 1;
-    size_t n_equations = 1;
-    DESolver solver( n_equations, n_inputs, n_inner_neurons );
-
-    /* SETUP OF THE EQUATIONS */
-    MultiIndex alpha_0( n_inputs );
-    MultiIndex alpha_2( n_inputs );
-    alpha_2.set_partial_derivative(0, 2);
+void export_solution( size_t n_test_points, double te, double ts, DESolver &solver, MultiIndex &alpha, const std::string prefix ){
+    NeuralNetwork *solution = solver.get_solution( alpha );
 
-    /* the governing differential equation */
-    char buff[255];
-    std::sprintf(buff, "%f", -EE);
-    std::string eigenvalue(buff);
-    solver.add_to_differential_equation( 0, alpha_2, "-1.0" );
-    solver.add_to_differential_equation( 0, alpha_0, "x^2" );
-    solver.add_to_differential_equation( 0, alpha_0, eigenvalue );
+    char buff[256];
+    sprintf( buff, "%sdata_1d_osc.txt", prefix.c_str() );
+    std::string final_fn( buff );
 
-    /* SETUP OF THE TRAINING DATA */
-    std::vector<double> inp, out;
+    std::ofstream ofs(final_fn, std::ofstream::out);
+    printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), 0.0);
+    double frac = (te - ts) / (n_test_points - 1), x;
 
-    double d1_s = ds, d1_e = de, frac;
+    std::vector<double> inp(1), out(1);
 
-    /* TRAIN DATA FOR THE GOVERNING DE */
-    std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
+    for(size_t i = 0; i < n_test_points; ++i){
+        x = frac * i + ts;
 
+        inp[0] = x;
+        solution->eval_single(inp, out);
+        ofs << i + 1 << " " << x << " " << out[0] << " " << std::endl;
 
-    /* ISOTROPIC TRAIN SET */
-    frac = (d1_e - d1_s) / (train_size - 1);
-    for(unsigned int i = 0; i < train_size; ++i){
-        inp = {frac * i + d1_s};
-        out = {0.0};
-        data_vec_g.emplace_back(std::make_pair(inp, out));
+        printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), (100.0 * i) / (n_test_points - 1));
+        std::cout.flush();
     }
-//    inp = {0.0};
-//    out = {1.0};
-//    data_vec_g.emplace_back(std::make_pair(inp, out));
-
-    DataSet ds_00(&data_vec_g);
+    printf("Exporting files '%s': %7.3f%%\n", final_fn.c_str(), 100.0);
+    std::cout.flush();
+    ofs.close();
+}
 
-    /* Placing the conditions into the solver */
-    solver.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+void optimize_via_particle_swarm( DESolver &solver, MultiIndex &alpha, size_t  max_iters, size_t n_particles ){
 
-    /* TRAINING METHOD SETUP */
-    size_t total_dim = solver.get_solution( alpha_0 )->get_n_biases() + solver.get_solution( alpha_0 )->get_n_weights();
-    std::vector<double> domain_bounds( 2 * total_dim );
+    printf("Solution via the particle swarm optimization!\n");
+    std::vector<double> domain_bounds(2 * (solver.get_solution( alpha )->get_n_biases() + solver.get_solution( alpha )->get_n_weights()));
 
     for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
         domain_bounds[2 * i] = -10;
@@ -94,50 +76,71 @@ void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_
             n_particles,
             max_iters
     );
+
     solver.solve( swarm );
+}
 
-    NeuralNetwork *solution = solver.get_solution( alpha_0 );
-    std::vector<double> parameters(total_dim);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
-    std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
-    std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
-    for(size_t i = 0; i < n_inner_neurons; ++i){
-        parameters[3 * i] = weight_params->at(i);
-        parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
-        parameters[3 * i + 2] = biases_params->at(i);
+void optimize_via_gradient_descent( DESolver &solver, double accuracy ){
 
-        printf("Path %3d. w%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), parameters[3 * i], (int)(i + 1), parameters[3 * i + 2], (int)(i + 1), parameters[3 * i + 1]);
-    }
+    printf("Solution via a gradient descent method!\n");
+    GradientDescent gd( accuracy, 1000 );
 
-    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
-    /* first boundary condition & its error */
-    std::ofstream ofs("data_1d_osc.txt", std::ofstream::out);
-    printf("Exporting files 'data_1d_osc.txt': %7.3f%%\r", 0.0);
-    frac = (te - ts) / (n_test_points - 1);
+    solver.randomize_parameters( );
+    solver.solve( gd );
+}
 
-    for(size_t i = 0; i < n_test_points; ++i){
-        double x = frac * i + ts;
+void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
+    std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
+    std::cout << "********************************************************************************************************************************************" <<std::endl;
 
-        inp[0] = x;
-        solution->eval_single(inp, out);
-        ofs << i + 1 << " " << x << " " << out[0] << " " << std::endl;
+    /* SOLVER SETUP */
+    size_t n_inputs = 1;
+    size_t n_equations = 1;
+    DESolver solver( n_equations, n_inputs, n_inner_neurons );
 
-        printf("Exporting files 'data_1d_osc.txt': %7.3f%%\r", (100.0 * i) / (n_test_points - 1));
-        std::cout.flush();
+    /* SETUP OF THE EQUATIONS */
+    MultiIndex alpha_0( n_inputs );
+    MultiIndex alpha_2( n_inputs );
+    alpha_2.set_partial_derivative(0, 2);
+
+    /* the governing differential equation */
+    char buff[255];
+    std::sprintf(buff, "%f", -EE);
+    std::string eigenvalue(buff);
+    solver.add_to_differential_equation( 0, alpha_2, "-1.0" );
+    solver.add_to_differential_equation( 0, alpha_0, "x^2" );
+    solver.add_to_differential_equation( 0, alpha_0, eigenvalue );
+
+    /* SETUP OF THE TRAINING DATA */
+    std::vector<double> inp, out;
+
+    double d1_s = ds, d1_e = de, frac;
+
+    /* TRAIN DATA FOR THE GOVERNING DE */
+    std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
+
+
+    /* ISOTROPIC TRAIN SET */
+    frac = (d1_e - d1_s) / (train_size - 1);
+    for(unsigned int i = 0; i < train_size; ++i){
+        inp = {frac * i + d1_s};
+        out = {0.0};
+        data_vec_g.emplace_back(std::make_pair(inp, out));
     }
-    printf("Exporting files 'data_1d_osc.txt': %7.3f%%\n", 100.0);
-    std::cout.flush();
-    ofs.close();
+    inp = {0.0};
+    out = {1.0};
+    data_vec_g.emplace_back(std::make_pair(inp, out));
 
-    inp[0] = -1.0;
-    solution->eval_single(inp, out);
-    printf("y(-1) = %f\n", out[0]);
-    inp[0] = 0.0;
-    solution->eval_single(inp, out);
-    printf("y( 0) = %f\n", out[0]);
-    inp[0] = 1.0;
-    solution->eval_single(inp, out);
-    printf("y( 1) = %f\n", out[0]);
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    DataSet ds_00(&data_vec_g);
+
+    /* Placing the conditions into the solver */
+    solver.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+
+    optimize_via_particle_swarm( solver, alpha_0, max_iters, n_particles );
+    export_solution( n_test_points, te, ts, solver, alpha_0, "particle_" );
+
+    optimize_via_gradient_descent( solver, accuracy );
+    export_solution( n_test_points, te, ts, solver, alpha_0, "gradient_" );
 }
 
 int main() {
@@ -163,21 +166,5 @@ int main() {
     size_t n_particles = 100;
     test_harmonic_oscilator_fixed_E(EE, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
 
-//    std::string expression_string = "-x";
-//    std::string expression_string_1 = "1.0";
-//    ExprtkWrapper f(expression_string);
-//    ExprtkWrapper f1(expression_string_1);
-//
-//
-//    f1.eval();
-//
-//    std::vector<double> inp(1);
-//
-//    inp = {150};
-//    double result = f.eval(inp);
-//
-//    f1.eval();
-//    inp = {15};
-//    result = f.eval(inp);
     return 0;
 }
\ No newline at end of file
diff --git a/src/examples/net_test_ode_1.cpp b/src/examples/net_test_ode_1.cpp
index 7f8deda3a7e0da2799374b17d4dbf05d86beae4a..fa6dc5cbccbf61ac924c47fc26992419ca2d69a3 100644
--- a/src/examples/net_test_ode_1.cpp
+++ b/src/examples/net_test_ode_1.cpp
@@ -19,467 +19,105 @@
 #include <random>
 #include <iostream>
 
-#include "4neuro.h"
+#include "lib4neuro.h"
 
-double eval_f(double x){
-    return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
-}
-
-double eval_df(double x){
-    return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
-}
-
-double eval_ddf(double x){
-    return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
-}
-
-double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> &parameters){
-    double value= 0.0, wi, ai, bi, ei, ei1;
-    for(size_t i = 0; i < n_inner_neurons; ++i){
-
-        wi = parameters[3 * i];
-        ai = parameters[3 * i + 1];
-        bi = parameters[3 * i + 2];
-
-        ei = std::pow(E, bi - wi * x);
-        ei1 = ei + 1.0;
-
-        value += ai / (ei1);
-    }
-    return value;
-}
-
-double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> &parameters){
-    double value= 0.0, wi, ai, bi, ei, ei1;
-    for(size_t i = 0; i < n_inner_neurons; ++i){
-
-        wi = parameters[3 * i];
-        ai = parameters[3 * i + 1];
-        bi = parameters[3 * i + 2];
-
-        ei = std::pow(E, bi - wi * x);
-        ei1 = ei + 1.0;
-
-        value += ai * wi * ei / (ei1 * ei1);
-    }
-
-    return value;
-}
-
-double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> &parameters){
-    double value= 0.0, wi, ai, bi, ewx, eb;
 
-    for(size_t i = 0; i < n_inner_neurons; ++i){
 
-        wi = parameters[3 * i];
-        ai = parameters[3 * i + 1];
-        bi = parameters[3 * i + 2];
 
+void optimize_via_particle_swarm( DESolver &solver, MultiIndex &alpha, size_t  max_iters, size_t n_particles ){
 
-        eb = std::pow(E, bi);
-        ewx = std::pow(E, wi * x);
+    printf("Solution via the particle swarm optimization!\n");
+    std::vector<double> domain_bounds(2 * (solver.get_solution( alpha )->get_n_biases() + solver.get_solution( alpha )->get_n_weights()));
 
-        value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i + 1] = 10;
     }
 
-    return value;
-}
-
-//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
-double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, ai, bi, ei, ei1;
-    wi = parameters[3 * neuron_idx];
-    ai = parameters[3 * neuron_idx + 1];
-    bi = parameters[3 * neuron_idx + 2];
-
-    ei = std::pow(E, bi - wi * x);
-    ei1 = ei + 1.0;
-
-    return (ai * x * ei) / (ei1 * ei1);
-}
-
-//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
-double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> &parameters){
-
-    double wi, ai, bi, ei, ei1;
-
-    wi = parameters[3 * neuron_idx];
-    ai = parameters[3 * neuron_idx + 1];
-    bi = parameters[3 * neuron_idx + 2];
-
-    ei = std::pow(E, bi - wi * x);
-    ei1 = ei + 1.0;
-
-    return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
-}
-
-//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
-double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, ai, bi, eb, ewx;
-
-    wi = parameters[3 * neuron_idx];
-    ai = parameters[3 * neuron_idx + 1];
-    bi = parameters[3 * neuron_idx + 2];
-
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi * x);
-
-    return  -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
-}
-
-//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
-double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, bi, ei, ei1;
-
-    wi = parameters[3 * neuron_idx];
-    bi = parameters[3 * neuron_idx + 2];
-
-    ei = std::pow(E, bi - wi * x);
-    ei1 = ei + 1.0;
-
-    return 1.0 / ei1;
-}
-
-//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
-double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, bi, ei, ei1;
-
-    wi = parameters[3 * neuron_idx];
-    bi = parameters[3 * neuron_idx + 2];
-
-    ei = std::pow(E, bi - wi * x);
-    ei1 = ei + 1.0;
-
-    return (wi*ei)/(ei1 * ei1);
-}
-
-//ddNN partial derivative (ai):  -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
-double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, bi, eip, ewx, eb;
-
-    wi = parameters[3 * neuron_idx];
-    bi = parameters[3 * neuron_idx + 2];
-
-    eip = std::pow(E, bi + wi * x);
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi * x);
-
-    return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
-}
-
-//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
-double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, bi, ei, ai, ei1;
-    wi = parameters[3 * neuron_idx];
-    ai = parameters[3 * neuron_idx + 1];
-    bi = parameters[3 * neuron_idx + 2];
-
-    ei = std::pow(E, bi - wi * x);
-    ei1 = ei + 1.0;
-
-    return -(ai * ei)/(ei1 * ei1);
-}
-
-//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
-double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, bi, ai, ewx, eb;
-
-    wi = parameters[3 * neuron_idx];
-    ai = parameters[3 * neuron_idx + 1];
-    bi = parameters[3 * neuron_idx + 2];
-
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi*x);
+    double c1 = 1.7;
+    double c2 = 1.7;
+    double w = 0.700;
 
-    return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
-}
+    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
+     * terminating criterion is met */
+    double gamma = 0.5;
 
-//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
-double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
-    double wi, bi, ai, ewx, eb;
+    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
+     * terminating criterion is met ('n' is the total number of particles) */
+    double epsilon = 0.02;
+    double delta = 0.7;
 
-    wi = parameters[3 * neuron_idx];
-    ai = parameters[3 * neuron_idx + 1];
-    bi = parameters[3 * neuron_idx + 2];
+    ParticleSwarm swarm(
+            &domain_bounds,
+            c1,
+            c2,
+            w,
+            gamma,
+            epsilon,
+            delta,
+            n_particles,
+            max_iters
+    );
 
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi*x);
+    solver.solve( swarm );
 
-    return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
 }
 
-double eval_error_function(std::vector<double> &parameters, size_t n_inner_neurons, std::vector<double> test_points){
-
-    double output = 0.0, approx, frac = 1.0 / (test_points.size());
-
-    for(auto x: test_points){
-        /* governing equation */
-        approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
+void optimize_via_gradient_descent( DESolver &solver, double accuracy ){
+    printf("Solution via a gradient descent method!\n");
+    GradientDescent gd( accuracy, 1000 );
 
-        output += (0.0 - approx) * (0.0 - approx) * frac;
-
-    }
-
-    /* BC */
-    approx = eval_approx_f(0.0, n_inner_neurons, parameters);
-    output += (1.0 - approx) * (1.0 - approx);
-
-    approx = eval_approx_df(0.0, n_inner_neurons, parameters);
-    output += (1.0 - approx) * (1.0 - approx);
-
-
-    return output;
+    solver.randomize_parameters( );
+    solver.solve( gd );
 }
 
-void eval_step_size_simple(double &gamma, double val, double prev_val, double sk, double grad_norm, double grad_norm_prev){
+void export_solution( size_t n_test_points, double te, double ts, DESolver &solver, MultiIndex &alpha_0, MultiIndex &alpha_1, MultiIndex &alpha_2, const std::string prefix ){
+    NeuralNetwork *solution = solver.get_solution( alpha_0 );
+    NeuralNetwork *solution_d = solver.get_solution( alpha_1 );
+    NeuralNetwork *solution_dd = solver.get_solution( alpha_2 );
 
-    if(val > prev_val){
-        gamma *= 0.99999;
-    }
+    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
+    /* first boundary condition & its error */
 
-    if(sk <= 1e-3 || grad_norm < grad_norm_prev){
-        /* movement on a line */
-        /* new slope is less steep, speed up */
-        gamma *= 1.0005;
-    }
-    else if(grad_norm > grad_norm_prev){
-        /* new slope is more steep, slow down*/
-        gamma /= 1.0005;
-    }
-    else{
-        gamma /= 1.005;
-    }
-//        gamma *= 0.999999;
-}
+    char buff[256];
+    sprintf( buff, "%sdata_1d_ode1.txt", prefix.c_str() );
+    std::string final_fn( buff );
 
-void eval_step_size_mk( double &gamma, double beta, double &c, double grad_norm_prev, double grad_norm, double fi, double fim ){
+    std::ofstream ofs(final_fn, std::ofstream::out);
+    printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), 0.0);
+    double frac = (te - ts) / (n_test_points - 1);
 
-    if( fi > fim )
-    {
-        c /= 1.0000005;
-    }
-    else if( fi < fim )
-    {
-        c *= 1.0000005;
-    }
+    std::vector<double> inp(1), out(1);
 
-    gamma *= std::pow( c, 1.0 - 2.0 * beta) * std::pow( grad_norm_prev / grad_norm, 1.0 / c );
-
-
-}
+    for(size_t i = 0; i < n_test_points; ++i){
+        double x = frac * i + ts;
+        inp[0] = x;
 
+        solution->eval_single(inp, out);
+        double F = out[0];
 
-double calculate_gradient( std::vector<double> &data_points, size_t n_inner_neurons, std::vector<double> *parameters, std::vector<double> *gradient ){
-    size_t i, j;
-    double x,  mem, derror, total_error, approx;
+        solution_d->eval_single( inp, out);
+        double DF = out[0];
 
-    size_t train_size = data_points.size();
+        solution_dd->eval_single( inp, out);
+        double DDF = out[0];
 
-    /* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
-    x = 0.0;
-    mem = (1.0 - eval_approx_f(x, n_inner_neurons, *parameters));
-    derror = 2.0 * mem;
-    total_error = mem * mem;
-    for(i = 0; i < n_inner_neurons; ++i){
-        (*gradient)[3 * i] -= derror * eval_approx_dw_f(x, i, *parameters);
-        (*gradient)[3 * i + 1] -= derror * eval_approx_da_f(x, i, *parameters);
-        (*gradient)[3 * i + 2] -= derror * eval_approx_db_f(x, i, *parameters);
-    }
+        ofs << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << F << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << DF << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << DDF << std::endl;
 
-    /* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
-    mem = (1.0 - eval_approx_df(x, n_inner_neurons, *parameters));
-    derror = 2.0 * mem;
-    total_error += mem * mem;
-    for(i = 0; i < n_inner_neurons; ++i){
-        (*gradient)[3 * i] -= derror * eval_approx_dw_df(x, i, *parameters);
-        (*gradient)[3 * i + 1] -= derror * eval_approx_da_df(x, i, *parameters);
-        (*gradient)[3 * i + 2] -= derror * eval_approx_db_df(x, i, *parameters);
+        printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), (100.0 * i) / (n_test_points - 1));
+        std::cout.flush();
     }
+    printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), 100.0);
+    std::cout.flush();
+    ofs.close();
 
-    for(j = 0; j < data_points.size(); ++j){
-        x = data_points[j];
-        /* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
-        approx= eval_approx_ddf(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_f(x, n_inner_neurons, *parameters);
-        mem = 0.0 - approx;
-        derror = 2.0 * mem / train_size;
-        for(i = 0; i < n_inner_neurons; ++i){
-            (*gradient)[3 * i] -= derror * (eval_approx_dw_ddf(x, i, *parameters) + 4.0 * eval_approx_dw_df(x, i, *parameters) + 4.0 * eval_approx_dw_f(x, i, *parameters));
-            (*gradient)[3 * i + 1] -= derror * (eval_approx_da_ddf(x, i, *parameters) + 4.0 * eval_approx_da_df(x, i, *parameters) + 4.0 * eval_approx_da_f(x, i, *parameters));
-            (*gradient)[3 * i + 2] -= derror * (eval_approx_db_ddf(x, i, *parameters) + 4.0 * eval_approx_db_df(x, i, *parameters) + 4.0 * eval_approx_db_f(x, i, *parameters));
-        }
-        total_error += mem * mem / train_size;
-    }
+    std::cout << "********************************************************************************************************************************************" <<std::endl;
 
-    return total_error;
 }
 
-
-//void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, double d1_s, double d1_e,
-//                                size_t test_size, double ts, double te) {
-//
-//    std::cout << "Finding a solution via a Gradient Descent method with adaptive step-length" << std::endl;
-//    std::cout << "********************************************************************************************************************************************" <<std::endl;
-//
-//    /* SETUP OF THE TRAINING DATA */
-//    std::vector<double> inp, out;
-//
-//    double frac, alpha, x;
-//    double grad_norm = accuracy * 10.0, mem, ai, bi, wi, error, derror, approx, xj, gamma, total_error, sk, sy, sx, sg, beta;
-//    double grad_norm_prev = grad_norm;
-//    size_t i, j, iter_idx = 0;
-//
-//    /* TRAIN DATA FOR THE GOVERNING DE */
-//    std::vector<double> data_points(train_size);
-//
-//    /* ISOTROPIC TRAIN SET */
-//    frac = (d1_e - d1_s) / (train_size - 1);
-//    for(unsigned int i = 0; i < train_size; ++i){
-//        data_points[i] = frac * i;
-//    }
-//
-////    /* CHEBYSCHEV TRAIN SET */
-////    alpha = PI / (train_size );
-////    frac = 0.5 * (d1_e - d1_s);
-////    for(i = 0; i < train_size; ++i){
-////        x = (std::cos(PI - alpha * i) + 1.0) * frac + d1_s;
-////        data_points[i] = x;
-////    }
-//
-////    DataSet ds(0.0, 4.0, train_size, 0.0);
-//
-//    std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
-//    std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
-//    std::vector<double> *params_current = new std::vector<double>(guess);
-//    std::vector<double> *params_prev = new std::vector<double>(guess);
-//    std::vector<double> *conjugate_direction_current = new std::vector<double>(3 * n_inner_neurons);
-//    std::vector<double> *conjugate_direction_prev = new std::vector<double>(3 * n_inner_neurons);
-//
-//    std::vector<double> *ptr_mem;
-//
-//
-//    std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-//    std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
-//    std::fill(conjugate_direction_current->begin(), conjugate_direction_current->end(), 0.0);
-//    std::fill(conjugate_direction_prev->begin(), conjugate_direction_prev->end(), 0.0);
-//
-//
-//
-////    for (i = 0; i < n_inner_neurons; ++i) {
-////        wi = (*params_current)[3 * i];
-////        ai = (*params_current)[3 * i + 1];
-////        bi = (*params_current)[3 * i + 2];
-////
-////        printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
-////    }
-//
-//    gamma = 1.0;
-//    double prev_val, val = 0.0, c = 2.0;
-//    while( grad_norm > accuracy) {
-//        iter_idx++;
-//        prev_val = val;
-//        grad_norm_prev = grad_norm;
-//
-//        /* reset of the current gradient */
-//        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-//        val = calculate_gradient( data_points, n_inner_neurons, params_current, gradient_current );
-//
-//        grad_norm = 0.0;
-//        for(auto v: *gradient_current){
-//            grad_norm += v * v;
-//        }
-//        grad_norm = std::sqrt(grad_norm);
-//
-//        /* Update of the parameters */
-//        /* step length calculation */
-//        if(iter_idx < 10 || iter_idx % 100 == 0){
-//            /* fixed step length */
-//            gamma = 0.1 * accuracy;
-//        }
-//        else{
-//
-//
-//
-//            /* norm of the gradient calculation */
-//
-//            sk = 0.0;
-//            for(i = 0; i < gradient_current->size(); ++i){
-//                sx = (*gradient_current)[i] - (*gradient_prev)[i];
-//                sk += sx * sx;
-//            }
-//            sk = std::sqrt(sk);
-//
-//            /* angle between two consecutive gradients */
-//            double beta = 0.0, sx = 0.0;
-//            for(i = 0; i < gradient_current->size(); ++i){
-//                sx += (gradient_current->at( i ) * gradient_prev->at( i ));
-//            }
-//            sx /= grad_norm * grad_norm_prev;
-//            beta = std::sqrt(std::acos( sx ) / PI);
-//
-//
-////            eval_step_size_simple( gamma, val, prev_val, sk, grad_norm, grad_norm_prev );
-//            eval_step_size_mk( gamma, beta, c, grad_norm_prev, grad_norm, val, prev_val );
-//        }
-//
-//
-//
-//
-//
-//
-//        for(i = 0; i < gradient_current->size(); ++i){
-//            (*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
-//        }
-//
-//        /* switcheroo */
-//        ptr_mem = gradient_prev;
-//        gradient_prev = gradient_current;
-//        gradient_current = ptr_mem;
-//
-//        ptr_mem = params_prev;
-//        params_prev = params_current;
-//        params_current = ptr_mem;
-//
-//
-//        if(iter_idx % 1 == 0){
-//            printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r", (int)iter_idx, gamma, c, grad_norm, val);
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n", (int)iter_idx, gamma, c, grad_norm, val);
-//    std::cout.flush();
-//
-//    for (i = 0; i < n_inner_neurons; ++i) {
-//        wi = (*params_current)[3 * i];
-//        ai = (*params_current)[3 * i + 1];
-//        bi = (*params_current)[3 * i + 2];
-//
-//        printf("Path %3d. w%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), wi, (int)(i + 1), bi, (int)(i + 1), ai);
-//    }
-//    std::cout << "********************************************************************************************************************************************" <<std::endl;
-//
-//
-////    data_export_gradient(params_current);
-////    if(total_error < 1e-3 || true){
-////        /* ISOTROPIC TEST SET */
-////        frac = (te - ts) / (test_size - 1);
-////        for(j = 0; j < test_size; ++j){
-////            xj = frac * j + ts;
-////
-////            std::cout << j + 1 << " " << xj << " " << eval_f(xj) << " " << eval_approx_f(xj, n_inner_neurons, *params_current) << " " << eval_df(xj) << " " << eval_approx_df(xj, n_inner_neurons, *params_current) << " " << eval_ddf(xj) << " " << eval_approx_ddf(xj, n_inner_neurons, *params_current) << std::endl;
-////        }
-////    }
-//
-//    delete gradient_current;
-//    delete gradient_prev;
-//    delete params_current;
-//    delete params_prev;
-//    delete conjugate_direction_current;
-//    delete conjugate_direction_prev;
-//}
-
 void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
 
-    std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
+    std::cout << "Finding a solution via the Particle Swarm Optimization and Gradient descent method!" << std::endl;
     std::cout << "********************************************************************************************************************************************" <<std::endl;
 
     /* SOLVER SETUP */
@@ -495,9 +133,9 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
     alpha_1.set_partial_derivative(0, 1);
 
     /* the governing differential equation */
-    solver_01.add_to_differential_equation( 0, alpha_2, "1.0" );
-    solver_01.add_to_differential_equation( 0, alpha_1, "4.0" );
     solver_01.add_to_differential_equation( 0, alpha_0, "4.0" );
+    solver_01.add_to_differential_equation( 0, alpha_1, "4.0" );
+    solver_01.add_to_differential_equation( 0, alpha_2, "1.0" );
 
     /* dirichlet boundary condition */
     solver_01.add_to_differential_equation( 1, alpha_0, "1.0" );
@@ -556,135 +194,13 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
     solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_01 );
     solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_02 );
 
-
-    size_t total_dim = (2 + n_inputs) * n_inner_neurons;
-
-    std::vector<double> params(total_dim), params_analytical(total_dim);
-    std::random_device seeder;
-    std::mt19937 gen(seeder());
-    std::uniform_real_distribution<double> dist(-10.0, 10.0);
-
-    std::vector<double> input(1);
-//    for( size_t testi = 0; testi < 50; ++testi ){
-//        double test_error_eq1 = 0.0, total_error = 0.0;
-//        for(size_t i = 0; i < params.size(); ++i){
-//            params[i] = dist(gen);
-//        }
-//        for(size_t i = 0; i < n_inner_neurons; ++i){
-//            params_analytical[3 * i] = params[i];
-//            params_analytical[3 * i + 1] = params[n_inner_neurons + i];
-//            params_analytical[3 * i + 2] = params[2 * n_inner_neurons + i];
-//        }
-//
-//        for(auto d: *ds_00.get_data()){
-//            input = d.first;
-//            double x = input[0];
-//
-//            double analytical_value_f = eval_approx_f(x, n_inner_neurons, params_analytical);
-//            double analytical_value_df = eval_approx_df(x, n_inner_neurons, params_analytical);
-//            double analytical_value_ddf = eval_approx_ddf(x, n_inner_neurons, params_analytical);
-//
-//            double de_solver_value_eq1 = solver_01.eval_equation(0, &params, input);
-//            double analytical_value_eq1 = 4 * analytical_value_f + 4 * analytical_value_df + analytical_value_ddf;
-//            test_error_eq1 += (de_solver_value_eq1 - analytical_value_eq1) * (de_solver_value_eq1 - analytical_value_eq1);
-//
-//        }
-//        input[0] = 0.0;
-//        double de_solver_value_eq2 = solver_01.eval_equation(1, &params, input);
-//        double analytical_value_eq2 = eval_approx_f(0.0, n_inner_neurons, params_analytical);
-//        double test_error_eq2 = (de_solver_value_eq2 - analytical_value_eq2) * (de_solver_value_eq2 - analytical_value_eq2);
-//
-//        double de_solver_value_eq3 = solver_01.eval_equation(2, &params, input);
-//        double analytical_value_eq3 = eval_approx_df(0.0, n_inner_neurons, params_analytical);
-//        double test_error_eq3 = (de_solver_value_eq3 - analytical_value_eq3) * (de_solver_value_eq3 - analytical_value_eq3);
-//
-//        double total_error_de_solver = solver_01.eval_total_error(params);
-//
-//        double total_error_analytical = eval_error_function(params_analytical, n_inner_neurons, test_points);
-//
-//        printf("\tRepresentation test %6d, error of eq1: %10.8f, error of eq2: %10.8f, error of eq3: %10.8f, total error: %10.8f\n", (int)testi, std::sqrt(test_error_eq1), std::sqrt(test_error_eq2), std::sqrt(test_error_eq3), (total_error_analytical - total_error_de_solver) * (total_error_analytical - total_error_de_solver));
-//    }
-
     /* TRAINING METHOD SETUP */
-    std::vector<double> domain_bounds(2 * (solver_01.get_solution( alpha_0 )->get_n_biases() + solver_01.get_solution( alpha_0 )->get_n_weights()));
+    optimize_via_particle_swarm( solver_01, alpha_0, max_iters, n_particles );
+    export_solution( n_test_points, te, ts, solver_01 , alpha_0, alpha_1, alpha_2, "particle_" );
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
-        domain_bounds[2 * i] = -10;
-        domain_bounds[2 * i + 1] = 10;
-    }
-
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.700;
-
-    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
-     * terminating criterion is met */
-    double gamma = 0.5;
-
-    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
-     * terminating criterion is met ('n' is the total number of particles) */
-    double epsilon = 0.02;
-    double delta = 0.7;
 
-    ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
-    );
-    solver_01.solve( swarm_01 );
-
-    NeuralNetwork *solution = solver_01.get_solution( alpha_0 );
-    NeuralNetwork *solution_d = solver_01.get_solution( alpha_1 );
-    NeuralNetwork *solution_dd = solver_01.get_solution( alpha_2 );
-
-    std::vector<double> parameters(total_dim);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
-    std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
-    std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
-    for(size_t i = 0; i < n_inner_neurons; ++i){
-        parameters[3 * i] = weight_params->at(i);
-        parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
-        parameters[3 * i + 2] = biases_params->at(i);
-
-        printf("Path %3d. w%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), parameters[3 * i], (int)(i + 1), parameters[3 * i + 2], (int)(i + 1), parameters[3 * i + 1]);
-    }
-
-    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
-    /* first boundary condition & its error */
-	/*
-    std::ofstream ofs("data_1d_ode1.txt", std::ofstream::out);
-    printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\r", 0.0);
-    frac = (te - ts) / (n_test_points - 1);
-
-    for(size_t i = 0; i < n_test_points; ++i){
-        double x = frac * i + ts;
-        inp[0] = x;
-
-        solution->eval_single(inp, out);
-        double F = out[0];
-
-        solution_d->eval_single( inp, out);
-        double DF = out[0];
-
-        solution_dd->eval_single( inp, out);
-        double DDF = out[0];
-
-        ofs << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << F << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << DF << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << DDF << std::endl;
-
-        printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\r", (100.0 * i) / (n_test_points - 1));
-        std::cout.flush();
-    }
-    printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\n", 100.0);
-    std::cout.flush();
-    ofs.close();
-
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-	*/
+    optimize_via_gradient_descent( solver_01, accuracy );
+    export_solution( n_test_points, te, ts, solver_01 , alpha_0, alpha_1, alpha_2, "gradient_" );
 }
 
 int main() {
@@ -699,7 +215,7 @@ int main() {
 
     unsigned int n_inner_neurons = 2;
     unsigned int train_size = 10;
-    double accuracy = 1e-3;
+    double accuracy = 1e-6;
     double ds = 0.0;
     double de = 4.0;
 
@@ -709,20 +225,9 @@ int main() {
 
     size_t particle_swarm_max_iters = 1000;
     size_t n_particles = 100;
-    test_ode(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
 
-//    std::vector<double> init_guess = {0.35088209, -0.23738505, 0.14160885, 3.72785473, -6.45758308, 1.73769138};
-//    std::vector<double> init_guess(3 * n_inner_neurons);
-//
-//    std::random_device seeder;
-//    std::mt19937 gen(seeder());
-//    std::uniform_real_distribution<double> dist(-1.0, 1.0);
-//    for(unsigned int i = 0; i < init_guess.size(); ++i){
-//        init_guess[i] = dist(gen);
-//    }
 
-    //TODO, sometimes produces nans in combinations with extremely high number of iterations, INVESTIGATE
-//    test_analytical_gradient_y(init_guess, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te);
+    test_ode(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
 
 
     return 0;
diff --git a/src/examples/net_test_pde_1.cpp b/src/examples/net_test_pde_1.cpp
index 48913ef4ba6891d59bfa8b22ab09f573d11ca2e0..f6b847679f65be0298ca547d098f24e8e12df7e5 100644
--- a/src/examples/net_test_pde_1.cpp
+++ b/src/examples/net_test_pde_1.cpp
@@ -22,617 +22,157 @@
 #include <iostream>
 #include <fstream>
 
-#include "4neuro.h"
+#include "lib4neuro.h"
 
-//y(x, t) = ... ai * f(wxi * x + wti * t - bi)
-double eval_approx_y(double x, double t, size_t n_inner_neurons, std::vector<double> &parameters){
-    double value= 0.0, wxi, wti, ai, bi, ei, ei1;
-    for(size_t i = 0; i < n_inner_neurons; ++i){
+void optimize_via_particle_swarm( DESolver &solver, MultiIndex &alpha, size_t  max_iters, size_t n_particles ){
 
-        wxi = parameters[4 * i + 0];
-        wti = parameters[4 * i + 1];
-        ai  = parameters[4 * i + 2];
-        bi  = parameters[4 * i + 3];
+    printf("Solution via the particle swarm optimization!\n");
+    std::vector<double> domain_bounds(2 * (solver.get_solution( alpha )->get_n_biases() + solver.get_solution( alpha )->get_n_weights()));
 
-        ei = std::pow(E, bi - wxi * x - wti * t);
-        ei1 = ei + 1.0;
-
-        value += ai / (ei1);
+    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i + 1] = 10;
     }
-    return value;
+
+    double c1 = 1.7;
+    double c2 = 1.7;
+    double w = 0.700;
+
+    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
+     * terminating criterion is met */
+    double gamma = 0.5;
+
+    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
+     * terminating criterion is met ('n' is the total number of particles) */
+    double epsilon = 0.02;
+    double delta = 0.7;
+
+    ParticleSwarm swarm(
+            &domain_bounds,
+            c1,
+            c2,
+            w,
+            gamma,
+            epsilon,
+            delta,
+            n_particles,
+            max_iters
+    );
+
+    solver.solve( swarm );
+
+}
+
+void optimize_via_gradient_descent( DESolver &solver, double accuracy ){
+    printf("Solution via a gradient descent method!\n");
+    GradientDescent gd( accuracy, 1000 );
+
+    solver.randomize_parameters( );
+    solver.solve( gd );
 }
-//yt(x, t) = ... (ai * wti * e^(bi - wti * t - wxi * x))/(e^(bi - wti * t - wxi * x) + 1)^2
-double eval_approx_yt(double x, double t, size_t n_inner_neurons, std::vector<double> &parameters){
-    double value= 0.0, wxi, wti, ai, bi, ei, ei1;
 
-    for(size_t i = 0; i < n_inner_neurons; ++i){
+void export_solution( size_t n_test_points, double te, double ts, DESolver &solver, MultiIndex &alpha_00, MultiIndex &alpha_01, MultiIndex &alpha_20, const std::string prefix ){
+    NeuralNetwork *solution = solver.get_solution( alpha_00 );
+    NeuralNetwork *solution_t = solver.get_solution( alpha_01 );
+    NeuralNetwork *solution_xx = solver.get_solution( alpha_20 );
+
+    size_t i, j;
+    double x, t;
+    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
+    /* first boundary condition & its error */
 
-        wxi = parameters[4 * i + 0];
-        wti = parameters[4 * i + 1];
-        ai  = parameters[4 * i + 2];
-        bi  = parameters[4 * i + 3];
+    char buff[256];
+    sprintf( buff, "%sdata_2d_pde1_y.txt", prefix.c_str() );
+    std::string final_fn( buff );
 
-        ei = std::pow(E, bi - wxi * x - wti * t);
-        ei1 = ei + 1.0;
+    printf("Exporting file '%s' : %7.3f%%\r", final_fn.c_str( ), 0.0 );
+    std::cout.flush();
 
-        value += ai * wti * ei / (ei1 * ei1);
+    std::vector<double> input(2), output(1), output_t(1), output_xx(1);
+    std::ofstream ofs(final_fn, std::ofstream::out);
+    double frac = (te - ts) / (n_test_points - 1);
+    for(i = 0; i < n_test_points; ++i){
+        x = i * frac + ts;
+        for(j = 0; j < n_test_points; ++j){
+            t = j * frac + ts;
+            input = {x, t};
+
+            solution->eval_single( input, output );
+
+            ofs << x << " " << t << " " << output[0] << std::endl;
+            printf("Exporting file '%s' : %7.3f%%\r", final_fn.c_str(), (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
+            std::cout.flush();
+        }
     }
-    return value;
-}
-////yx(x, t) = ...  (ai * wxi * e^(bi - t * wti - wxi * x))/(e^(bi - t * wti - wxi * x) + 1)^2
-//double eval_approx_yx(double x, double t, size_t n_inner_neurons, std::vector<double> &parameters){
-//    double value= 0.0, wxi, wti, ai, bi, ei, ei1;
-//
-//    for(size_t i = 0; i < n_inner_neurons; ++i){
-//
-//        wxi = parameters[4 * i + 0];
-//        wti = parameters[4 * i + 1];
-//        ai  = parameters[4 * i + 2];
-//        bi  = parameters[4 * i + 3];
-//
-//        ei = std::pow(E, bi - wxi * x - wti * t);
-//        ei1 = ei + 1.0;
-//
-//        value += (ai * wxi * ei1)/(ei1 * ei1);
-//    }
-//    return value;
-//}
-//yxx(x, t) = ...  (ai * wxi * e^(bi - t * wti - wxi * x))/(e^(bi - t * wti - wxi * x) + 1)^2
-double eval_approx_yxx(double x, double t, size_t n_inner_neurons, std::vector<double> &parameters){
-    double value= 0.0, wxi, wti, ai, bi, ei, ei1;
-    for(size_t i = 0; i < n_inner_neurons; ++i){
-
-        wxi = parameters[4 * i + 0];
-        wti = parameters[4 * i + 1];
-        ai  = parameters[4 * i + 2];
-        bi  = parameters[4 * i + 3];
-
-        ei = std::pow(E, bi - wxi * x - wti * t);
-        ei1 = ei + 1.0;
-
-        value += (2 * ai * wxi * wxi * ei * ei) / (ei1 * ei1 * ei1) - (ai * wxi * wxi * ei) / (ei1 * ei1);
+    printf("Exporting file '%s' : %7.3f%%\n", final_fn.c_str(), 100.0);
+    std::cout.flush();
+    ofs.close();
+
+    /* governing equation error */
+    sprintf( buff, "%sdata_2d_pde1_first_equation_error.txt", prefix.c_str() );
+    final_fn = std::string( buff );
+
+    ofs = std::ofstream(final_fn, std::ofstream::out);
+    printf("Exporting file '%s' : %7.3f%%\r", final_fn.c_str(), 0.0);
+    for(i = 0; i < n_test_points; ++i){
+        x = i * frac + ts;
+        for(j = 0; j < n_test_points; ++j){
+            t = j * frac + ts;
+            input = {x, t};
+
+            solution_t->eval_single( input, output_t );
+            solution_xx->eval_single( input, output_xx );
+
+            ofs << x << " " << t << " " << std::fabs(output_xx[0] - output_t[0]) << std::endl;
+            printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
+            std::cout.flush();
+        }
     }
-    return value;
-}
-//
-//double eval_approx_da_y(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, bi, ei, ei1;
-//
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return 1.0 / ei1;
-//}
-//
-//double eval_approx_dwx_y(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return  (ai * x * ei)/(ei1 * ei1);
-//}
-//
-//double eval_approx_dwt_y(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return  (ai * t * ei)/(ei1 * ei1);
-//}
-//
-//double eval_approx_db_y(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, bi, ei, ai, ei1;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return -(ai * ei)/(ei1 * ei1);
-//}
-//
-//double eval_approx_da_yt(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, bi, ei, ei1;
-//
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return (wti * ei)/(ei1 * ei1);
-//}
-//
-//double eval_approx_dwx_yt(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return (2 * ai * wti * x * ei * ei)/(ei1 * ei1 * ei1) - (ai * wti * x * ei)/(ei1 * ei1);
-//}
-//
-//double eval_approx_dwt_yt(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return  -(ai * t * wti * ei) / (ei1 * ei1) + (2 * ai * t * wti * ei * ei)/(ei1 * ei1 * ei1) + (ai * ei)/(ei1 * ei1);
-//}
-//
-//double eval_approx_db_yt(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ei1 = ei + 1.0;
-//
-//    return (ai * wti * ei) / (ei1 * ei1) - (2 * ai * wti * ei * ei) / (ei1 * ei1 * ei1);
-//}
-//
-//double eval_approx_da_yxx(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1, ebp, eb, etx;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ebp= std::pow(E, bi + wxi * x + wti * t);
-//    eb = std::pow(E, bi);
-//    etx = std::pow(E, wxi * x + wti * t);
-//    ei1 = eb + etx;
-//
-//    return -(wxi * wxi * ebp * (etx - eb))/(ei1 * ei1 * ei1);
-//}
-//
-//double eval_approx_dwx_yxx(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1, ebp, eb, etx;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ebp= std::pow(E, bi + wxi * x + wti * t);
-//    eb = std::pow(E, bi);
-//    etx = std::pow(E, wxi * x + wti * t);
-//    ei1 = eb + etx;
-//
-//    return (ai * wxi * wxi * x * ei) / ((ei + 1) * (ei + 1)) - (6 * ai * wxi * wxi * x * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1)) + (6 * ai * wxi *wxi * x * ei * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1) * (ei + 1)) - (2 * ai * wxi * ei) / ((ei + 1) * (ei + 1)) + (4 * ai * wxi * ei * ei)/((ei + 1) * (ei + 1) * (ei + 1));
-//}
-//
-//double eval_approx_dwt_yxx(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1, ebp, eb, etx;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ebp= std::pow(E, bi + wxi * x + wti * t);
-//    eb = std::pow(E, bi);
-//    etx = std::pow(E, wxi * x + wti * t);
-//    ei1 = eb + etx;
-//
-//    return (ai * t * wxi * wxi * ei) / ((ei + 1) * (ei + 1)) - (6 * ai * t * wxi * wxi * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1)) + (6 * ai * t * wxi * wxi * ei * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1) * (ei + 1));
-//}
-//
-//double eval_approx_db_yxx(double x, double t, size_t neuron_idx, std::vector<double> &parameters){
-//    double wxi, wti, ai, bi, ei, ei1, ebp, eb, etx;
-//    wxi = parameters[4 * neuron_idx + 0];
-//    wti = parameters[4 * neuron_idx + 1];
-//    ai =  parameters[4 * neuron_idx + 2];
-//    bi =  parameters[4 * neuron_idx + 3];
-//
-//    ei = std::pow(E, bi - wxi * x - wti * t);
-//    ebp= std::pow(E, bi + wxi * x + wti * t);
-//    eb = std::pow(E, bi);
-//    etx = std::pow(E, wxi * x + wti * t);
-//    ei1 = eb + etx;
-//
-//    return (ai * wxi * wxi * eb * ebp) / (ei1 * ei1 * ei1) - (ai * wxi * wxi * ebp * (etx - eb)) / (ei1 * ei1 * ei1) + (3 * ai * wxi * wxi * eb * ebp * (etx - eb)) / (ei1 * ei1 * ei1 * ei1);
-//}
-//
-//void eval_step_size_simple(double &gamma, double val, double prev_val, double sk, double grad_norm, double grad_norm_prev){
-//
-//    if(val > prev_val){
-//        gamma *= 0.99999;
-//    }
-//
-//    if(sk <= 1e-3 || grad_norm < grad_norm_prev){
-//        /* movement on a line */
-//        /* new slope is less steep, speed up */
-//        gamma *= 1.0005;
-//    }
-//    else if(grad_norm > grad_norm_prev){
-//        /* new slope is more steep, slow down*/
-//        gamma /= 1.0005;
-//    }
-//    else{
-//        gamma /= 1.005;
-//    }
-////        gamma *= 0.999999;
-//}
-//
-//void eval_step_size_mk( double &gamma, double beta, double &c, double grad_norm_prev, double grad_norm, double fi, double fim ){
-//
-//    if( fi > fim )
-//    {
-//        c /= 1.0000005;
-//    }
-//    else if( fi < fim )
-//    {
-//        c *= 1.0000005;
-//    }
-//
-//    gamma *= std::pow( c, 1.0 - 2.0 * beta) * std::pow( grad_norm_prev / grad_norm, 1.0 / c );
-//
-//
-//}
-//
-//double calculate_gradient( std::vector<double> &data_points, size_t n_inner_neurons, std::vector<double> *parameters, std::vector<double> *gradient ){
-//    size_t i, j, k;
-//    double x, t, mem, derror, total_error, approx;
-//
-//    size_t train_size = data_points.size();
-//
-//    /* error of boundary condition: y(0, t) = sin(t) => e1 = 1/n * (sin(t) - y(0, t))^2 */
-//    for(i = 0; i < train_size; ++i){
-//
-//        t = data_points[i];
-//        mem = (std::sin(t) - eval_approx_y(0.0, t, n_inner_neurons, *parameters));
-//        derror = 2.0 * mem / train_size;
-//
-//        for(j = 0; j < n_inner_neurons; ++j){
-//            (*gradient)[4 * j + 0] -= derror * eval_approx_dwx_y(0, t, j, *parameters);
-//            (*gradient)[4 * j + 1] -= derror * eval_approx_dwt_y(0, t, j, *parameters);
-//            (*gradient)[4 * j + 2] -= derror *  eval_approx_da_y(0, t, j, *parameters);
-//            (*gradient)[4 * j + 3] -= derror *  eval_approx_db_y(0, t, j, *parameters);
-//        }
-//
-//        total_error += mem * mem / train_size;
-//    }
-//
-//
-//
-//    /* error boundary condition: y(x, 0) = e^(-(0.5)^(0.5)x) * sin(-(0.5)^(0.5)x) => e2 = 1/n * (e^(-(0.5)^(0.5)x) * sin(-(0.5)^(0.5)x) - y(x, 0))^2 */
-//    for(i = 0; i < train_size; ++i){
-//
-//        x = data_points[i];
-//        mem = (std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x ) - eval_approx_y(x, 0.0, n_inner_neurons, *parameters));
-//        derror = 2.0 * mem / train_size;
-//
-//        for(j = 0; j < n_inner_neurons; ++j){
-//            (*gradient)[4 * j + 0] -= derror * eval_approx_dwx_y(x, 0, j, *parameters);
-//            (*gradient)[4 * j + 1] -= derror * eval_approx_dwt_y(x, 0, j, *parameters);
-//            (*gradient)[4 * j + 2] -= derror *  eval_approx_da_y(x, 0, j, *parameters);
-//            (*gradient)[4 * j + 3] -= derror *  eval_approx_db_y(x, 0, j, *parameters);
-//        }
-//
-//        total_error += mem * mem / train_size;
-//    }
-//
-//    /* error of the governing equation: y_xx - y_t = 0 => e3 = 1/n^2 * (0 - y_xx + y_t)^2 */
-//    for(i = 0; i < data_points.size(); ++i){
-//        x = data_points[i];
-//        for(j = 0; j < data_points.size(); ++j){
-//            t = data_points[j];
-//
-//            approx = eval_approx_yxx(x, t, n_inner_neurons, *parameters) - eval_approx_yt(x, t, n_inner_neurons, *parameters);
-//            mem = 0.0 - approx;
-//            derror = 2.0 * mem / (train_size * train_size);
-//            for(k = 0; k < n_inner_neurons; ++k){
-//                (*gradient)[4 * k + 0] -= derror * (eval_approx_dwx_yxx(x, t, k, *parameters) - eval_approx_dwx_yt(x, t, k, *parameters));
-//                (*gradient)[4 * k + 1] -= derror * (eval_approx_dwt_yxx(x, t, k, *parameters) - eval_approx_dwt_yt(x, t, k, *parameters));
-//                (*gradient)[4 * k + 2] -= derror * ( eval_approx_da_yxx(x, t, k, *parameters) -  eval_approx_da_yt(x, t, k, *parameters));
-//                (*gradient)[4 * k + 3] -= derror * ( eval_approx_db_yxx(x, t, k, *parameters) -  eval_approx_db_yt(x, t, k, *parameters));
-//            }
-//            total_error += mem * mem / (train_size * train_size);
-//        }
-//    }
-//    return total_error;
-//}
-//
-//void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te){
-//    std::cout << "Finding a solution via a Gradient Descent method with adaptive step-length" << std::endl;
-//    std::cout << "********************************************************************************************************************************************" <<std::endl;
-//    /* SETUP OF THE TRAINING DATA */
-//    std::vector<double> inp, out;
-//
-//    double frac, alpha, x;
-//    double grad_norm = accuracy * 10.0, mem, ai, bi, wxi, wti, error, derror, approx, t, gamma, total_error, sk, sy, sx, sg, beta;
-//    double grad_norm_prev = grad_norm;
-//    size_t i, j, k, iter_idx = 0;
-//
-//    /* TRAIN DATA FOR THE GOVERNING DE */
-//    std::vector<double> data_points(train_size);
-//
-//    /* ISOTROPIC TRAIN SET */
-//    frac = (de - ds) / (train_size - 1);
-//    for(i = 0; i < train_size; ++i){
-//        data_points[i] = frac * i + ds;
-////        std::cout << data_points[i] << std::endl;
-//    }
-//
-////    /* CHEBYSCHEV TRAIN SET */
-////    alpha = PI / (train_size );
-////    frac = 0.5 * (de - ds);
-////    for(i = 0; i < train_size; ++i){
-////        x = (std::cos(PI - alpha * i) + 1.0) * frac + ds;
-////        data_points[i] = x;
-////    }
-//
-//    std::vector<double> *gradient_current = new std::vector<double>(4 * n_inner_neurons);
-//    std::vector<double> *gradient_prev = new std::vector<double>(4 * n_inner_neurons);
-//    std::vector<double> *params_current = new std::vector<double>(guess);
-//    std::vector<double> *params_prev = new std::vector<double>(guess);
-//
-//    std::vector<double> *ptr_mem;
-//
-//    std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-//    std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
-//
-////    for (i = 0; i < n_inner_neurons; ++i) {
-////        wxi = (*params_current)[4 * i + 0];
-////        wti = (*params_current)[4 * i + 1];
-////        ai  = (*params_current)[4 * i + 2];
-////        bi  = (*params_current)[4 * i + 3];
-////
-////        printf("Path %3d. wx = %15.8f, wt = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wxi, wti, bi, ai);
-////    }
-//
-//    gamma = 1.0;
-//    double val = 0.0, prev_val, c = 2.0;
-//    prev_val = 0.0;
-//    while( grad_norm > accuracy) {
-//        iter_idx++;
-//        prev_val = val;
-//        grad_norm_prev = grad_norm;
-//
-//        /* reset of the current gradient */
-//        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-//        val = calculate_gradient( data_points, n_inner_neurons, params_current, gradient_current );
-//
-//        grad_norm = 0.0;
-//        for(auto v: *gradient_current){
-//            grad_norm += v * v;
-//        }
-//        grad_norm = std::sqrt(grad_norm);
-//
-//        /* Update of the parameters */
-//        /* step length calculation */
-//        if(iter_idx < 10 || iter_idx % 100 == 0){
-//            /* fixed step length */
-//            gamma = 0.1 * accuracy;
-//        }
-//        else{
-//
-//
-//
-//            /* norm of the gradient calculation */
-//
-//            sk = 0.0;
-//            for(i = 0; i < gradient_current->size(); ++i){
-//                sx = (*gradient_current)[i] - (*gradient_prev)[i];
-//                sk += sx * sx;
-//            }
-//            sk = std::sqrt(sk);
-//
-//            /* angle between two consecutive gradients */
-//            double beta = 0.0, sx = 0.0;
-//            for(i = 0; i < gradient_current->size(); ++i){
-//                sx += (gradient_current->at( i ) * gradient_prev->at( i ));
-//            }
-//            sx /= grad_norm * grad_norm_prev;
-//            beta = std::sqrt(std::acos( sx ) / PI);
-//
-//
-////            eval_step_size_simple( gamma, val, prev_val, sk, grad_norm, grad_norm_prev );
-//            eval_step_size_mk( gamma, beta, c, grad_norm_prev, grad_norm, val, prev_val );
-//        }
-//
-//
-//
-//
-//
-//
-//        for(i = 0; i < gradient_current->size(); ++i){
-//            (*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
-//        }
-//
-//        /* switcheroo */
-//        ptr_mem = gradient_prev;
-//        gradient_prev = gradient_current;
-//        gradient_current = ptr_mem;
-//
-//        ptr_mem = params_prev;
-//        params_prev = params_current;
-//        params_current = ptr_mem;
-//
-//
-//        if(iter_idx % 1 == 0){
-//            printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r", (int)iter_idx, gamma, c, grad_norm, val);
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n", (int)iter_idx, gamma, c, grad_norm, val);
-//    std::cout.flush();
-//
-//    for (i = 0; i < n_inner_neurons; ++i) {
-//        wxi = (*params_current)[4 * i + 0];
-//        wti = (*params_current)[4 * i + 1];
-//        ai  = (*params_current)[4 * i + 2];
-//        bi  = (*params_current)[4 * i + 3];
-//
-//        printf("Path %3d. wx%d = %15.8f, wt%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), wxi , (int)(i + 1), wti, (int)(i + 1), bi, (int)(i + 1), ai);
-//    }
-//    std::cout << "********************************************************************************************************************************************" <<std::endl;
-//
-////    for (i = 0; i < n_inner_neurons; ++i) {
-////        wxi = (*params_current)[4 * i + 0];
-////        wti = (*params_current)[4 * i + 1];
-////        ai  = (*params_current)[4 * i + 2];
-////        bi  = (*params_current)[4 * i + 3];
-////
-////        printf("%f/(1+e^(%f-%fx-%ft)) + ", (int)(i + 1), ai, bi, wxi, wti);
-////    }
-////    printf("\n");
-//
-//
-//    /* SOLUTION EXPORT */
-//    printf("Exporting file 'data_2d_pde1_y.txt' : %7.3f%%\r", 0.0);
-//    std::cout.flush();
-//
-//    std::vector<double> input, output(1);
-//    std::ofstream ofs_y("data_2d_pde1_y.txt", std::ofstream::out);
-//    frac = (te - ts) / (n_test_points - 1);
-//    for(i = 0; i < n_test_points; ++i){
-//        x = i * frac + ts;
-//        for(j = 0; j < n_test_points; ++j){
-//            t = j * frac + ts;
-//            ofs_y << x << " " << t << " " << eval_approx_y(x, t, n_inner_neurons, *params_current) << std::endl;
-//            printf("Exporting file 'data_2d_pde1_y.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Exporting file 'data_2d_pde1_y.txt' : %7.3f%%\n", 100.0);
-//    std::cout.flush();
-//    ofs_y.close();
-//
-//    printf("Exporting file 'data_2d_pde1_yt.txt' : %7.3f%%\r", 0.0);
-//    std::cout.flush();
-//    std::ofstream ofs_t("data_2d_pde1_yt.txt", std::ofstream::out);
-//    frac = (te - ts) / (n_test_points - 1);
-//    for(i = 0; i < n_test_points; ++i){
-//        x = i * frac + ts;
-//        for(j = 0; j < n_test_points; ++j){
-//            t = j * frac + ts;
-//            ofs_t << x << " " << t << " " << eval_approx_yt(x, t, n_inner_neurons, *params_current) << std::endl;
-//            printf("Exporting file 'data_2d_pde1_yt.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Exporting file 'data_2d_pde1_yt.txt' : %7.3f%%\n", 100.0);
-//    std::cout.flush();
-//    ofs_t.close();
-//
-//    printf("Exporting file 'data_2d_pde1_yx.txt' : %7.3f%%\r", 0.0);
-//    std::cout.flush();
-//    std::ofstream ofs_x("data_2d_pde1_yx.txt", std::ofstream::out);
-//    frac = (te - ts) / (n_test_points - 1);
-//    for(i = 0; i < n_test_points; ++i){
-//        x = i * frac + ts;
-//        for(j = 0; j < n_test_points; ++j){
-//            t = j * frac + ts;
-//            ofs_x << x << " " << t << " " << eval_approx_yx(x, t, n_inner_neurons, *params_current) << std::endl;
-//            printf("Exporting file 'data_2d_pde1_yx.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Exporting file 'data_2d_pde1_yx.txt' : %7.3f%%\n", 100.0);
-//    std::cout.flush();
-//    ofs_x.close();
-//
-//    printf("Exporting file 'data_2d_pde1_yxx.txt' : %7.3f%%\r", 0.0);
-//    std::cout.flush();
-//    std::ofstream ofs_xx("data_2d_pde1_yxx.txt", std::ofstream::out);
-//    frac = (te - ts) / (n_test_points - 1);
-//    for(i = 0; i < n_test_points; ++i){
-//        x = i * frac + ts;
-//        for(j = 0; j < n_test_points; ++j){
-//            t = j * frac + ts;
-//            ofs_xx << x << " " << t << " " << eval_approx_yxx(x, t, n_inner_neurons, *params_current) << std::endl;
-//            printf("Exporting file 'data_2d_pde1_yxx.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Exporting file 'data_2d_pde1_yxx.txt' : %7.3f%%\n", 100.0);
-//    std::cout.flush();
-//    ofs_xx.close();
-//
-//    /* governing equation error */
-//    std::ofstream ofs_error("data_2d_pde1_first_equation_error.txt", std::ofstream::out);
-//    printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r", 0.0);
-//    for(i = 0; i < n_test_points; ++i){
-//        x = i * frac + ts;
-//        for(j = 0; j < n_test_points; ++j){
-//            t = j * frac + ts;
-//            ofs_error << x << " " << t << " " << std::fabs(eval_approx_yxx(x, t, n_inner_neurons, *params_current) - eval_approx_yt(x, t, n_inner_neurons, *params_current)) << std::endl;
-//            printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-//            std::cout.flush();
-//        }
-//    }
-//    printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\n", 100.0);
-//    std::cout.flush();
-//    ofs_error.close();
-//
-//    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
-//    /* first boundary condition & its error */
-//    std::ofstream ofs_bc_t("data_1d_pde1_yt.txt", std::ofstream::out);
-//    std::ofstream ofs_bc_x("data_1d_pde1_yx.txt", std::ofstream::out);
-//    printf("Exporting files 'data_1d_pde1_yt.txt' and 'data_1d_pde1_yx.txt' : %7.3f%%\r", 0.0);
-//    for(i = 0; i < n_test_points; ++i){
-//        x = frac * i + ts;
-//        t = frac * i + ts;
-//
-//        double yt = std::sin(t);
-//        double yx = std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x );
-//
-//        double evalt = eval_approx_y(0, t, n_inner_neurons, *params_current);
-//        double evalx = eval_approx_y(x, 0, n_inner_neurons, *params_current);
-//
-//        ofs_bc_t << i + 1 << " " << t << " " << yt << " " << evalt << " " << std::fabs(evalt - yt) << std::endl;
-//        ofs_bc_x << i + 1 << " " << x << " " << yx << " " << evalx << " " << std::fabs(evalx - yx) << std::endl;
-//
-//        printf("Exporting files 'data_1d_pde1_yt.txt' and 'data_1d_pde1_yx.txt' : %7.3f%%\r", (100.0 * i) / (n_test_points - 1));
-//        std::cout.flush();
-//    }
-//    printf("Exporting files 'data_1d_pde1_yt.txt' and 'data_1d_pde1_yx.txt' : %7.3f%%\r", 100.0);
-//    std::cout.flush();
-//    ofs_bc_t.close();
-//    ofs_bc_x.close();
-//
-//    delete gradient_current;
-//    delete gradient_prev;
-//    delete params_current;
-//    delete params_prev;
-//}
-
-void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
-    std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
+    printf("Exporting file '%s' : %7.3f%%\n", final_fn.c_str(), 100.0);
+    std::cout.flush();
+    ofs.close();
+
+    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
+    /* first boundary condition & its error */
+    sprintf( buff, "%sdata_1d_pde1_yt.txt", prefix.c_str() );
+    std::string final_fn_t(buff);
+
+    sprintf( buff, "%sdata_1d_pde1_yx.txt", prefix.c_str() );
+    std::string final_fn_x(buff);
+
+    ofs = std::ofstream(final_fn_t, std::ofstream::out);
+    std::ofstream ofs2(final_fn_x, std::ofstream::out);
+    printf("Exporting files '%s' and '%s' : %7.3f%%\r", final_fn_t.c_str(), final_fn_x.c_str(), 0.0);
+    for(i = 0; i < n_test_points; ++i){
+        x = frac * i + ts;
+        t = frac * i + ts;
+
+        double yt = std::sin(t);
+        double yx = std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x );
+
+        input = {0, t};
+        solution->eval_single(input, output);
+        double evalt = output[0];
+
+        input = {x, 0};
+        solution->eval_single(input, output);
+        double evalx = output[0];
+
+        ofs << i + 1 << " " << t << " " << yt << " " << evalt << " " << std::fabs(evalt - yt) << std::endl;
+        ofs2 << i + 1 << " " << x << " " << yx << " " << evalx << " " << std::fabs(evalx - yx) << std::endl;
+
+        printf("Exporting files '%s' and '%s' : %7.3f%%\r", final_fn_t.c_str(), final_fn_x.c_str(), (100.0 * i) / (n_test_points - 1));
+        std::cout.flush();
+    }
+    printf("Exporting files '%s' and '%s' : %7.3f%%\n", final_fn_t.c_str(), final_fn_x.c_str(), 100.0);
+    std::cout.flush();
+    ofs2.close();
+    ofs.close();
+
     std::cout << "********************************************************************************************************************************************" <<std::endl;
 
-    /* solution properties */
+}
+void test_pde(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
 
     /* do not change below */
     size_t n_inputs = 2;
@@ -701,129 +241,14 @@ void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_
     solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_t );
     solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_x );
 
-    size_t total_dim = (solver_01.get_solution( alpha_00 )->get_n_biases() + solver_01.get_solution( alpha_00 )->get_n_weights());
-
-    /* TRAINING METHOD SETUP */
-    std::vector<double> domain_bounds(2 * total_dim);
-
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
-        domain_bounds[2 * i] = -10;
-        domain_bounds[2 * i + 1] = 10;
-    }
-
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
-
-    /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
-     * terminating criterion is met */
-    double gamma = 0.5;
-
-    /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
-     * terminating criterion is met ('n' is the total number of particles) */
-    double epsilon = 0.02;
-    double delta = 0.7;
+    optimize_via_particle_swarm( solver_01, alpha_00, max_iters, n_particles );
+    export_solution( n_test_points, te, ts, solver_01 , alpha_00, alpha_01, alpha_20, "particle_" );
 
-    ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
-    );
-    solver_01.solve( swarm_01 );
 
+    optimize_via_gradient_descent( solver_01, accuracy );
+    export_solution( n_test_points, te, ts, solver_01 , alpha_00, alpha_01, alpha_20, "gradient_" );
 
-    size_t i, j;
-    std::vector<double> *w1_ptr = solver_01.get_solution( alpha_00 )->get_parameter_ptr_weights();
-    std::vector<double> *w2_ptr = solver_01.get_solution( alpha_00 )->get_parameter_ptr_biases();
-    std::vector<double> export_params(total_dim);
-    for(size_t i = 0; i < n_inner_neurons; ++i){
-        export_params[4 * i + 0] = w1_ptr->at(i);
-        export_params[4 * i + 1] = w1_ptr->at(n_inner_neurons + i);
-        export_params[4 * i + 2] = w1_ptr->at(2 * n_inner_neurons + i);
-        export_params[4 * i + 3] = w2_ptr->at( i );
-
-        printf("Path %3d. wx%d = %15.8f, wt%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), export_params[4 * i + 0] , (int)(i + 1), export_params[4 * i + 1], (int)(i + 1), export_params[4 * i + 3], (int)(i + 1), export_params[4 * i + 2]);
-    }
-    std::cout << "********************************************************************************************************************************************" << std::endl;
-    /* PRACTICAL END OF THE EXAMPLE */
-
-    /* SOLUTION EXPORT */
-    for(i = 0; i < n_inner_neurons; ++i){
-        export_params[4 * i + 0] = w1_ptr->at(i);
-        export_params[4 * i + 1] = w1_ptr->at(n_inner_neurons + i);
-        export_params[4 * i + 2] = w1_ptr->at(2 * n_inner_neurons + i);
-        export_params[4 * i + 3] = w2_ptr->at( i );
-    }
-/*
-    printf("Exporting file 'data_2d_pde1_y.txt' : %7.3f%%\r", 0.0);
-    std::cout.flush();
 
-    std::vector<double> input, output(1);
-    std::ofstream ofs("data_2d_pde1_y.txt", std::ofstream::out);
-    frac = (te - ts) / (n_test_points - 1);
-    for(i = 0; i < n_test_points; ++i){
-        x = i * frac + ts;
-        for(j = 0; j < n_test_points; ++j){
-            t = j * frac + ts;
-            ofs << x << " " << t << " " << eval_approx_y(x, t, n_inner_neurons, export_params) << std::endl;
-            printf("Exporting file 'data_2d_pde1_y.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-            std::cout.flush();
-        }
-    }
-    printf("Exporting file 'data_2d_pde1_y.txt' : %7.3f%%\n", 100.0);
-    std::cout.flush();
-    ofs.close();
-
-    /* governing equation error */
-	/*
-    ofs = std::ofstream("data_2d_pde1_first_equation_error.txt", std::ofstream::out);
-    printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r", 0.0);
-    for(i = 0; i < n_test_points; ++i){
-        x = i * frac + ts;
-        for(j = 0; j < n_test_points; ++j){
-            t = j * frac + ts;
-            ofs << x << " " << t << " " << std::fabs(eval_approx_yxx(x, t, n_inner_neurons, export_params) - eval_approx_yt(x, t, n_inner_neurons, export_params)) << std::endl;
-            printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
-            std::cout.flush();
-        }
-    }
-    printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\n", 100.0);
-    std::cout.flush();
-    ofs.close();
-*/
-    /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
-    /* first boundary condition & its error */
-	/*
-    ofs = std::ofstream("data_1d_pde1_yt.txt", std::ofstream::out);
-    std::ofstream ofs2("data_1d_pde1_yx.txt", std::ofstream::out);
-    printf("Exporting files 'data_1d_pde1_yt.txt' and 'data_1d_pde1_yx.txt' : %7.3f%%\r", 0.0);
-    for(i = 0; i < n_test_points; ++i){
-        x = frac * i + ts;
-        t = frac * i + ts;
-
-        double yt = std::sin(t);
-        double yx = std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x );
-
-        double evalt = eval_approx_y(0, t, n_inner_neurons, export_params);
-        double evalx = eval_approx_y(x, 0, n_inner_neurons, export_params);
-
-        ofs << i + 1 << " " << t << " " << yt << " " << evalt << " " << std::fabs(evalt - yt) << std::endl;
-        ofs2 << i + 1 << " " << x << " " << yx << " " << evalx << " " << std::fabs(evalx - yx) << std::endl;
-
-        printf("Exporting files 'data_1d_pde1_yt.txt' and 'data_1d_pde1_yx.txt' : %7.3f%%\r", (100.0 * i) / (n_test_points - 1));
-        std::cout.flush();
-    }
-    printf("Exporting files 'data_1d_pde1_yt.txt' and 'data_1d_pde1_yx.txt' : %7.3f%%\r", 100.0);
-    std::cout.flush();
-    ofs2.close();
-    ofs.close();
-	*/
 }
 
 int main() {
@@ -836,31 +261,19 @@ int main() {
     std::cout << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons" <<std::endl;
     std::cout << "********************************************************************************************************************************************" <<std::endl;
 
-    unsigned int n_inner_neurons = 6;
-    unsigned int train_size = 15;
-    double accuracy = 1e-4;
+    unsigned int n_inner_neurons = 4;
+    unsigned int train_size = 50;
+    double accuracy = 1e-3;
     double ds = 0.0;
     double de = 1.0;
 
-    unsigned int test_size = 20;
+    unsigned int test_size = 100;
     double ts = ds;
     double te = de + 0;
 
     size_t particle_swarm_max_iters = 1000;
     size_t n_particles = 50;
-    solve_example_particle_swarm(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
-
-
-//    std::vector<double> init_guess(4 * n_inner_neurons);
-//    std::random_device seeder;
-//    std::mt19937 gen(seeder());
-//    std::uniform_real_distribution<double> dist(-1.0, 1.0);
-//    for(unsigned int i = 0; i < init_guess.size(); ++i){
-//        init_guess[i] = dist(gen);
-//    }
-//
-////    init_guess = {-0.21709230, -0.26189447, 0.77853923, 0.41091127, -0.44311897, -0.99036349, 0.84912023, -0.16920743};
-//    solve_example_gradient(init_guess, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te);
+    test_pde(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
 
     return 0;
 }
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index b5811670b4dd10ccaacf7dc066f9741f197025a7..fa26e2c5c74647047bc18202667031bbc18164f4 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -7,7 +7,7 @@
  */
 
 #include <vector>
-#include "4neuro.h"
+#include "lib4neuro.h"
 
 int main() {
     std::cout << "Running lib4neuro Serialization example   1" << std::endl;
diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp
index 6369f099c4b0f688e2c56abf022f30495228a5be..034899bd2837451325bcb5b0141dc796a94c8acb 100644
--- a/src/examples/seminar.cpp
+++ b/src/examples/seminar.cpp
@@ -9,7 +9,7 @@
 #include <iostream>
 #include <fstream>
 
-#include "../../include/4neuro.h"
+#include "lib4neuro.h"
 #include "../Solvers/DESolver.h"
 
 int main() {
diff --git a/src/settings.h b/src/settings.h
deleted file mode 100644
index 4ed3e1860035bef5188d28f7982a604b07dc16d0..0000000000000000000000000000000000000000
--- a/src/settings.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * DESCRIPTION OF THE FILE
- *
- * @author Michal KravĨenko
- * @date 24.7.18 -
- */
-
-#ifndef INC_4NEURO_SETTINGS_H
-#define INC_4NEURO_SETTINGS_H
-
-/**
- * If defined, the NN feed-forward will print out whats happening
- */
-//#define VERBOSE_NN_EVAL
-
-#ifdef _WINDOWS
-#define LIB4NEURO_API __declspec(dllexport)
-#else
-#define LIB4NEURO_API
-#endif
-
-
-#endif //INC_4NEURO_SETTINGS_H
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 4da5a2430fec97979935d9d9c95547dac2ef220b..b3fe737e1d4e5cde3e5a13d0f534a1d4ab11249d 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -5,56 +5,56 @@
 add_executable(linear_neuron_test NeuronLinear_test.cpp)
 target_link_libraries(linear_neuron_test lib4neuro boost_unit_test)
 
-add_executable(constant_neuron_test NeuronConstant_test.cpp)
-target_link_libraries(constant_neuron_test lib4neuro boost_unit_test)
+#add_executable(constant_neuron_test NeuronConstant_test.cpp)
+#target_link_libraries(constant_neuron_test lib4neuro boost_unit_test)
 
-add_executable(binary_neuron_test NeuronBinary_test.cpp)
-target_link_libraries(binary_neuron_test lib4neuro boost_unit_test)
+#add_executable(binary_neuron_test NeuronBinary_test.cpp)
+#target_link_libraries(binary_neuron_test lib4neuro boost_unit_test)
 
-add_executable(logistic_neuron_test NeuronLogistic_test.cpp)
-target_link_libraries(logistic_neuron_test lib4neuro boost_unit_test)
+#add_executable(logistic_neuron_test NeuronLogistic_test.cpp)
+#target_link_libraries(logistic_neuron_test lib4neuro boost_unit_test)
 
-add_executable(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
-target_link_libraries(connectionFunctionGeneral_test lib4neuro boost_unit_test)
+#add_executable(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
+#target_link_libraries(connectionFunctionGeneral_test lib4neuro boost_unit_test)
 
-add_executable(neural_network_test NeuralNetwork_test.cpp)
-target_link_libraries(neural_network_test lib4neuro boost_unit_test)
+#add_executable(neural_network_test NeuralNetwork_test.cpp)
+#target_link_libraries(neural_network_test lib4neuro boost_unit_test)
 
-add_executable(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
-target_link_libraries(connection_Function_identity_test lib4neuro boost_unit_test)
+#add_executable(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
+#target_link_libraries(connection_Function_identity_test lib4neuro boost_unit_test)
 
-add_executable(dataset_test DataSet_test.cpp)
-target_link_libraries(dataset_test lib4neuro boost_unit_test)
+#add_executable(dataset_test DataSet_test.cpp)
+#target_link_libraries(dataset_test lib4neuro boost_unit_test)
 
-add_executable(errorfunction_test ErrorFunctions_test.cpp)
-target_link_libraries(errorfunction_test lib4neuro boost_unit_test)
+#add_executable(errorfunction_test ErrorFunctions_test.cpp)
+#target_link_libraries(errorfunction_test lib4neuro boost_unit_test)
 
-add_executable(particle_swarm_test ParticleSwarm_test.cpp)
-target_link_libraries(particle_swarm_test lib4neuro boost_unit_test)
+#add_executable(particle_swarm_test ParticleSwarm_test.cpp)
+#target_link_libraries(particle_swarm_test lib4neuro boost_unit_test)
 
-add_executable(particle_test Particle_test.cpp)
-target_link_libraries(particle_test lib4neuro boost_unit_test)
+#add_executable(particle_test Particle_test.cpp)
+#target_link_libraries(particle_test lib4neuro boost_unit_test)
 
-add_executable(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
-target_link_libraries(NeuralNetworkSum_test lib4neuro boost_unit_test)
+#add_executable(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
+#target_link_libraries(NeuralNetworkSum_test lib4neuro boost_unit_test)
 
-add_executable(DESolver_test DESolver_test.cpp)
-target_link_libraries(DESolver_test lib4neuro boost_unit_test)
+#add_executable(DESolver_test DESolver_test.cpp)
+#target_link_libraries(DESolver_test lib4neuro boost_unit_test)
 
 set_target_properties(
     linear_neuron_test
-    constant_neuron_test
-    binary_neuron_test
-    logistic_neuron_test
-    connectionFunctionGeneral_test
-    connection_Function_identity_test
-    neural_network_test
-    dataset_test
-    particle_swarm_test
-    particle_test
-    NeuralNetworkSum_test
-    errorfunction_test
-    DESolver_test
+    #constant_neuron_test
+    #binary_neuron_test
+    #logistic_neuron_test
+    #connectionFunctionGeneral_test
+    #connection_Function_identity_test
+    #neural_network_test
+    #dataset_test
+    #particle_swarm_test
+    #particle_test
+    #NeuralNetworkSum_test
+    #errorfunction_test
+    #DESolver_test
 
     PROPERTIES
         ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
diff --git a/src/tests/boost_unit_tests_preamble.h b/src/tests/boost_unit_tests_preamble.h
index a7656f83ac5d1e584c12b65dbd4af6e612ad4948..76b6255e5ede4eab40caf39be8b27cad9eaf08dc 100644
--- a/src/tests/boost_unit_tests_preamble.h
+++ b/src/tests/boost_unit_tests_preamble.h
@@ -1,4 +1,4 @@
- 
+
 
 
 #ifndef BOOST_TEST_DYN_LINK