diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ff0581104dbbcf88f014f3bf5bfec6549a5955f3..43309e2af87f6a8a5906f9f4e9b363e2439e0666 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -50,8 +50,8 @@ ubuntu_boost_system:
 
     before_script:
         - rm -rf external_dependencies
-        - git clone https://github.com/ArashPartow/exprtk.git
-        - cp exprtk/exprtk.hpp /usr/include
+#        - git clone https://github.com/ArashPartow/exprtk.git
+#        - cp exprtk/exprtk.hpp /usr/include
         - export TERM=xterm
         - cd build_scripts/linux
         - export DEPENDENCIES_LINK_TYPE=shared
diff --git a/.gitmodules b/.gitmodules
index 0cc6a91e81d7cbf0edb2f13ce8012dc5c775aeb8..6ce7b0ac039af32106c354aa9e1ada2716c2e6bd 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,3 +4,6 @@
 [submodule "external_dependencies/boost"]
 	path = external_dependencies/boost
 	url = https://github.com/boostorg/boost.git
+[submodule "external_dependencies/turtle"]
+	path = external_dependencies/turtle
+	url = https://github.com/mat007/turtle.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2465df77a1e77982413901f676037f154d5ec9db..08a45c8737dc54e81611157dc02779b024acca8f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -28,6 +28,7 @@ endif (NOT CMAKE_BUILD_TYPE)
 #------------------------#
 # Dependencies link type #
 #------------------------#
+set(DEPENDENCIES_LINK_TYPE static)
 if(NOT DEPENDENCIES_LINK_TYPE AND NOT ENV{DEPENDENCIES_LINK_TYPE})
     message(FATAL_ERROR "Please, set the variable DEPENDENCIES_LINK_TYPE to either 'static' or 'shared'!")
 endif()
@@ -45,6 +46,8 @@ elseif( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" )
     set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W0 /bigobj")
     add_compile_options("/D _SCL_SECURE_NO_WARNINGS")
     add_compile_options("/D_CRT_SECURE_NO_WARNINGS")
+elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MINGW")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mbig-obj")
 else()
     set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
 endif()
@@ -86,6 +89,8 @@ message("lib4neuro LIB DIR: ${LIB4NEURO_DIR}")
 
 find_package(exprtk)
 
+find_package(Turtle)
+
 #------------------------------------------#
 # Detect maximum available number of cores #
 # and set corresponding build options      #
diff --git a/FindBoost.cmake b/FindBoost.cmake
index 5b730564b5e1fa4a38734430d77fccb763080460..531d2eced41d2a570e9a6ee034a4069c7c403259 100644
--- a/FindBoost.cmake
+++ b/FindBoost.cmake
@@ -57,13 +57,8 @@ find_path(
 # as Boost headers are supposed to be included like
 # #include<boost/...> according to the documentation
 set(TMP "")
-#if(WIN32)
-#    string(REPLACE "\\boost\\boost" "\\boost" TMP ${Boost_INCLUDE_DIRS})
-#    list(APPEND Boost_INCLUDE_DIRS ${TMP})
-#else()
-    string(REPLACE "/boost/boost" "/boost" TMP ${Boost_INCLUDE_DIRS})
-    list(APPEND Boost_INCLUDE_DIRS ${TMP})
-#endif()
+string(REPLACE "/boost/boost" "/boost" TMP ${Boost_INCLUDE_DIRS})
+list(APPEND Boost_INCLUDE_DIRS ${TMP})
 
 if(NOT Boost_INCLUDE_DIRS)
     message(FATAL_ERROR "Boost include directory was not found! Please, set variable BOOST_INCLUDEDIR to the correct path.")
@@ -77,7 +72,7 @@ if(NOT DEPENDENCIES_LINK_TYPE)
 endif()
 
 set(LIB_PREFIX "lib")
-set(LIB_SUFFIX "a")  # suffix for Linux static libraries
+set(LIB_SUFFIX "lib")  # suffix for Linux static libraries
 if("${DEPENDENCIES_LINK_TYPE}" STREQUAL "shared" AND WIN32)
     set(LIB_PREFIX "")
     set(LIB_SUFFIX "dll")
diff --git a/build_scripts/windows/win_VS_build_x64_debug.bat b/build_scripts/windows/win_VS_build_x64_debug.bat
index ea146fca6812252485b9eeeb360578e8bbe41d48..42081c8ef8210ebc1254194326e48d6a3950d353 100644
--- a/build_scripts/windows/win_VS_build_x64_debug.bat
+++ b/build_scripts/windows/win_VS_build_x64_debug.bat
@@ -1,4 +1,4 @@
-@echo off
+echo off
 title Building the 'lib4neuro' project for Debug
 
 cls
@@ -13,10 +13,10 @@ set "BUILD_SOMETHING_LIB="
 rem call VsDevCmd.bat
 
 rem Should we rebuild BOOST? (yes/no)
-set REBUILD_BOOST=yes
+set REBUILD_BOOST=no
 
 rem Should we build the examples? (yes/no)
-set BUILD_EXAMPLES=yes
+set BUILD_EXAMPLES=no
 
 rem Should we build the unit-tests? (yes/no)
 set BUILD_TESTS=yes
@@ -71,7 +71,7 @@ IF "%REBUILD_BOOST%"=="yes" (
 
     echo "DEPENDENCIES_LINK_TYPE %DEPENDENCIES_LINK_TYPE%"
 	
-    .\b2 --layout=system variant=debug link=%DEPENDENCIES_LINK_TYPE% address-model=64 --with-system --with-serialization --with-random || exit 1
+    .\b2 --layout=system variant=debug link=static address-model=64 --with-system --with-serialization --with-random || exit 1
 
 	cd ..\..\build_scripts\windows
 
@@ -82,17 +82,7 @@ IF "%REBUILD_BOOST%"=="yes" (
 
 IF "%BUILD_SOMETHING_LIB%"=="yes" (
 	
-	IF "%BUILD_LIB%"=="yes" (
-		call win_clean_lib.bat
-	)
-
-	IF "%BUILD_EXAMPLES%"=="yes" (
-		call win_clean_examples.bat
-	)
 	
-	IF "%BUILD_TESTS%"=="yes" (
-		call win_clean_tests.bat
-	)
 	
 	title Building the 'lib4neuro' project for Debug (preparing makefiles)
 	
@@ -103,7 +93,7 @@ IF "%BUILD_SOMETHING_LIB%"=="yes" (
 	set MAKEFILE_GENERATOR="Visual Studio 15 2017 Win64"
 
 	cd ..\..
-	cmake -G "Visual Studio 15 2017 Win64" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=%CXX_COMPILER% -DCMAKE_C_COMPILER=%C_COMPILER% -DBOOST_LIBRARYDIR=%BOOST_LIBRARYDIR% -DBOOST_INCLUDEDIR=%BOOST_INCLUDEDIR% -DBUILD_TESTS=%BUILD_TESTS% -DBUILD_EXAMPLES=%BUILD_EXAMPLES% -DBUILD_LIB=%BUILD_LIB% -DLIB4NEURO_DIR=build\lib -DDEPENDENCIES_LINK_TYPE=%DEPENDENCIES_LINK_TYPE% . || exit 1
+	cmake -G "Visual Studio 15 2017 Win64" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=%CXX_COMPILER% -DCMAKE_C_COMPILER=%C_COMPILER% -DBOOST_LIBRARYDIR=%BOOST_LIBRARYDIR% -DBOOST_INCLUDEDIR=%BOOST_INCLUDEDIR% -DBUILD_TESTS=%BUILD_TESTS% -DBUILD_EXAMPLES=%BUILD_EXAMPLES% -DBUILD_LIB=%BUILD_LIB% -DLIB4NEURO_DIR=build\lib -DDEPENDENCIES_LINK_TYPE=static . || exit 1
 	
 	title Building the 'lib4neuro' project for Debug (building)
 	(cmake --build .) && (echo "Build complete.") || exit 1
@@ -126,11 +116,6 @@ IF "%BUILD_SOMETHING_LIB%"=="yes" (
 	IF "%BUILD_EXAMPLES%"=="yes" (
 		cd ..\..
 		
-		rem Moving EXAMPLE files around to have a neater structure
-		mkdir build\tmp
-		xcopy /y build\examples\bin\Debug\*.exe build\tmp 2>NUL
-		rmdir /s /q "build\examples" 2> NUL
-		move build\tmp build\examples
 		
 		xcopy /y build\lib\*.dll build\examples 2>NUL
 		
@@ -140,12 +125,6 @@ IF "%BUILD_SOMETHING_LIB%"=="yes" (
 	IF "%BUILD_TESTS%"=="yes" (
 		cd ..\..
 		
-		rem Moving EXAMPLE files around to have a neater structure
-		mkdir build\tmp
-		xcopy /y build\unit-tests\bin\Debug\*.exe build\tmp 2>NUL
-		rmdir /s /q "build\unit-tests" 2> NUL
-		move build\tmp build\unit-tests
-
 		xcopy /y build\lib\*.dll build\unit-tests 2>NUL
 		
 		cd build_scripts\windows
diff --git a/build_scripts/windows/win_download_dependencies.bat b/build_scripts/windows/win_download_dependencies.bat
deleted file mode 100644
index fcc257c9e0945aa54fcdd16d7469f60c318e6d0f..0000000000000000000000000000000000000000
--- a/build_scripts/windows/win_download_dependencies.bat
+++ /dev/null
@@ -1,46 +0,0 @@
-@echo off
-title Downloading 'lib4neuro' project dependencies
-
-cls
-
-set "DOWNLOAD_DEP="
-
-rem call VsDevCmd.bat
-
-rem Should we download BOOST? (yes/no)
-set DOWNLOAD_DEP=yes
-
-IF "%DOWNLOAD_DEP%"=="yes" (
-    echo The required libraries will be downloaded from the official repositories into the directory 'external_dependencies'
-	rem pause
-)
-
-
-rem Dependencies download
-IF "%DOWNLOAD_DEP%"=="yes" (
-
-	cd ..\..
-	
-	rmdir /s /q "external_dependencies" 2>NUL
-	
-	git submodule init
-	git submodule update --remote
-	
-	cd external_dependencies\boost
-	
-		rem Submodules containing headers included in lib4neuro directly, but not required by serialization, system nor random
-		rem set BOOST_DEPENDENCIES_LIB4NEURO=libs/algorithm libs/range libs/concept_check libs/test libs/timer libs/exception
-		
-		rem Submodules required by linked libraries serialization, system or random
-		rem set BOOST_DEPENDENCIES=libs/bind libs/container_hash libs/type_index libs/function libs/array libs/optional libs/integer libs/utility libs/move libs/detail libs/throw_exception tools/build libs/config libs/assert libs/predef libs/io libs/spirit libs/smart_ptr libs/static_assert libs/type_traits libs/mpl libs/core libs/preprocessor libs/iterator libs/winapi
-
-		rem Submodules required by lib4neuro
-		rem set BOOST_REQUIRED_LIBS=libs/serialization libs/system libs/random
-	
-		rem echo Running 'submodule update --init %BOOST_DEPENDENCIES_LIB4NEURO% %BOOST_DEPENDENCIES% %BOOST_REQUIRED_LIBS%'
-		rem git submodule update  --init libs/algorithm libs/exception libs/range libs/concept_check libs/test libs/timer libs/bind libs/container_hash libs/type_index libs/function libs/array libs/optional libs/integer libs/utility libs/move libs/detail libs/throw_exception tools/build libs/config libs/assert libs/predef libs/io libs/spirit libs/smart_ptr libs/static_assert libs/type_traits libs/mpl libs/core libs/preprocessor libs/iterator libs/winapi libs/serialization libs/system libs/random
-		git submodule update  --init
-
-		call bootstrap.bat 
-	cd ..\..\build_scripts\windows
-)
diff --git a/include/4neuro.h b/include/4neuro.h
index 486fa50b13c33a9370b602f0feccc7291681da9d..3dd291b928653ace298bfcc2e1f9ad64d3c0317e 100644
--- a/include/4neuro.h
+++ b/include/4neuro.h
@@ -8,19 +8,18 @@
 //TODO make only public interface visible
 
 #include "../src/DataSet/DataSet.h"
-#include "../src/LearningMethods/ParticleSwarm.h"
-#include "../src/NetConnection/ConnectionFunctionGeneral.h"
-#include "../src/NetConnection/ConnectionFunctionIdentity.h"
 #include "../src/Network/NeuralNetwork.h"
-//#include "../src/Network/NeuralNetworkSum.h"
+#include "../src/Network/NeuralNetworkSum.h"
 #include "../src/Neuron/Neuron.h"
+#include "../src/Neuron/NeuronConstant.h"
 #include "../src/Neuron/NeuronBinary.h"
 #include "../src/Neuron/NeuronLinear.h"
 #include "../src/Neuron/NeuronLogistic.h"
 #include "../src/Solvers/DESolver.h"
+#include "../src/ErrorFunction/ErrorFunctions.h"
 #include "../src/constants.h"
-#include "../src/settings.h"
-#include "../src/message.h"
 
+// Abbreaviate lib4neuro namespace to l4n
+namespace l4n = lib4neuro;
 
 #endif //INC_4NEURO_4NEURO_H
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 81a466328320ede33293897d654d4b8ae8e7e74f..c2f9bbd184de486d886c85d8dfc489f2edbaa069 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -13,6 +13,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
         SHARED
 
         General/ExprtkWrapper.cpp
+        Exception/Exceptions.cpp
     )
 
     target_include_directories(
@@ -44,6 +45,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
 		DataSet/DataSet.cpp        
 		ErrorFunction/ErrorFunctions.cpp        
 		Solvers/DESolver.cpp
+		Exception/Exceptions.cpp
 	)
 
     target_link_libraries(
diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index b3efddc20b886503fafcc859eee0bc984767c690..c02624052ef06117a57194def75c4c7d1d9ea366 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -5,173 +5,175 @@
 
 #include "DataSetSerialization.h"
 
-InvalidDimension::InvalidDimension() : std::runtime_error("Invalid dimension specified!") {};
+namespace lib4neuro {
 
-InvalidDimension::InvalidDimension(std::string msg) : std::runtime_error(msg.c_str()) {};
+    DataSet::DataSet(std::string file_path) {
+        std::ifstream ifs(file_path);
+        boost::archive::text_iarchive ia(ifs);
+        ia >> *this;
+        ifs.close();
+    }
 
-DataSet::DataSet(std::string file_path) {
-    std::ifstream ifs (file_path);
-    boost::archive::text_iarchive ia(ifs);
-    ia >> *this;
-    ifs.close();
-}
+    DataSet::DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>> *data_ptr) {
+        this->n_elements = data_ptr->size();
+        this->data = *data_ptr;
 
-DataSet::DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>> *data_ptr) {
-    this->n_elements = data_ptr->size();
-    this->data = *data_ptr;
+        this->input_dim = this->data[0].first.size();
+        this->output_dim = this->data[0].second.size();
 
-    this->input_dim = this->data[0].first.size();
-    this->output_dim = this->data[0].second.size();
+        //TODO check the complete data set for input/output dimensions
+    }
 
-    //TODO check the complete data set for input/output dimensions
-}
+    DataSet::DataSet(double lower_bound, double upper_bound, unsigned int size, double output) {
+        std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
+        this->data = new_data_vec;
+        this->n_elements = 0;
+        this->input_dim = 1;
+        this->output_dim = 1;
 
-DataSet::DataSet(double lower_bound, double upper_bound, unsigned int size, double output) {
-    std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
-    this->data = new_data_vec;
-    this->n_elements = 0;
-    this->input_dim = 1;
-    this->output_dim = 1;
+        this->add_isotropic_data(lower_bound, upper_bound, size, output);
+    }
 
-    this->add_isotropic_data(lower_bound, upper_bound, size, output);
-}
+    DataSet::DataSet(std::vector<double> &bounds, unsigned int no_elems_in_one_dim,
+                     std::vector<double> (*output_func)(std::vector<double> &), unsigned int output_dim) {
+        std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
+        this->data = new_data_vec;
+        this->input_dim = bounds.size() / 2;
+        this->output_dim = output_dim;
+        this->n_elements = 0;
 
-DataSet::DataSet(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&), unsigned int output_dim) {
-    std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
-    this->data = new_data_vec;
-    this->input_dim = bounds.size()/2;
-    this->output_dim = output_dim;
-    this->n_elements = 0;
+        this->add_isotropic_data(bounds, no_elems_in_one_dim, output_func);
+    }
 
-    this->add_isotropic_data(bounds, no_elems_in_one_dim, output_func);
-}
 
+    void DataSet::add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs) {
+        if (inputs.size() != this->input_dim) {
+            throw InvalidDimension("Bad input dimension.");
+        } else if (outputs.size() != this->output_dim) {
+            throw InvalidDimension("Bad output dimension.");
+        }
 
-void DataSet::add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs) {
-    if(inputs.size() != this->input_dim) {
-        throw InvalidDimension("Bad input dimension.");
-    } else if(outputs.size() != this->output_dim) {
-        throw InvalidDimension("Bad output dimension.");
+        this->n_elements++;
+        this->data.emplace_back(std::make_pair(inputs, outputs));
     }
 
-    this->n_elements++;
-    this->data.emplace_back(std::make_pair(inputs, outputs));
-}
+    void DataSet::add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output) {
+        if (this->input_dim != 1 || this->output_dim != 1) {
+            throw InvalidDimension("Cannot add data with dimensionality 1:1 when the data set "
+                                   "is of different dimensionality!");
+        }
 
-void DataSet::add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output) {
-    if(this->input_dim != 1 || this->output_dim != 1) {
-        throw InvalidDimension("Cannot add data with dimensionality 1:1 when the data set "
-                               "is of different dimensionality!");
-    }
+        double frac = (upper_bound - lower_bound) / (size - 1);
+        std::vector<double> inp, out;
 
-    double frac = (upper_bound - lower_bound) / (size - 1);
-    std::vector<double> inp, out;
+        out = {output};
 
-    out = {output};
+        for (unsigned int i = 0; i < size; ++i) {
+            inp = {frac * i};
+            this->data.emplace_back(std::make_pair(inp, out));
+        }
 
-    for(unsigned int i = 0; i < size; ++i){
-        inp = {frac*i};
-        this->data.emplace_back(std::make_pair(inp, out));
+        this->n_elements += size;
     }
 
-    this->n_elements += size;
-}
+    void DataSet::add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim,
+                                     std::vector<double> (*output_func)(std::vector<double> &)) {
+        // TODO add check of dataset dimensions
 
-void DataSet::add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&)) {
-    // TODO add check of dataset dimensions
+        std::vector<std::vector<double>> grid;
+        std::vector<double> tmp;
+        double frac;
 
-    std::vector<std::vector<double>> grid;
-    std::vector<double> tmp;
-    double frac;
+        for (unsigned int i = 0; i < bounds.size(); i += 2) {
+            frac = (bounds[i] + bounds[i + 1]) / (no_elems_in_one_dim - 1);
+            tmp.clear();
+            for (double j = bounds[i]; j <= bounds[i + 1]; j += frac) {
+                tmp.emplace_back(j);
+            }
 
-    for(unsigned int i = 0; i < bounds.size(); i += 2) {
-        frac = (bounds[i] + bounds[i+1]) / (no_elems_in_one_dim - 1);
-        tmp.clear();
-        for(double j = bounds[i]; j <= bounds[i+1]; j += frac) {
-            tmp.emplace_back(j);
+            grid.emplace_back(tmp);
         }
 
-        grid.emplace_back(tmp);
+        grid = this->cartesian_product(&grid);
+
+        for (auto vec : grid) {
+            this->n_elements++;
+            this->data.emplace_back(std::make_pair(vec, output_func(vec)));
+        }
     }
 
-    grid = this->cartesian_product(&grid);
+    std::vector<std::pair<std::vector<double>, std::vector<double>>> *DataSet::get_data() {
+        return &(this->data);
+    }
 
-    for(auto vec : grid) {
-        this->n_elements++;
-        this->data.emplace_back(std::make_pair(vec, output_func(vec)));
+    size_t DataSet::get_n_elements() {
+        return this->n_elements;
     }
-}
-
-std::vector<std::pair<std::vector<double>, std::vector<double>>>* DataSet::get_data() {
-    return &(this->data);
-}
-
-size_t DataSet::get_n_elements() {
-    return this->n_elements;
-}
-
-size_t DataSet::get_input_dim() {
-    return this->input_dim;
-}
-
-size_t DataSet::get_output_dim() {
-    return this->output_dim;
-}
-
-void DataSet::print_data() {
-    if (n_elements) {
-        for (auto p : this->data) {
-            /* INPUT */
-            for (auto v : std::get<0>(p)) {
-                std::cout << v << " ";
-            }
 
-            std::cout << "-> ";
+    size_t DataSet::get_input_dim() {
+        return this->input_dim;
+    }
 
-            /* OUTPUT */
-            for (auto v : std::get<1>(p)) {
-                std::cout << v << " ";
-            }
+    size_t DataSet::get_output_dim() {
+        return this->output_dim;
+    }
+
+    void DataSet::print_data() {
+        if (n_elements) {
+            for (auto p : this->data) {
+                /* INPUT */
+                for (auto v : std::get<0>(p)) {
+                    std::cout << v << " ";
+                }
+
+                std::cout << "-> ";
+
+                /* OUTPUT */
+                for (auto v : std::get<1>(p)) {
+                    std::cout << v << " ";
+                }
 
-            std::cout << std::endl;
+                std::cout << std::endl;
+            }
         }
     }
-}
-
-void DataSet::store_text(std::string &file_path) {
-    //TODO check if stream was successfully opened
-    std::ofstream ofs(file_path);
-    boost::archive::text_oarchive oa(ofs);
-    oa << *this;
-    ofs.close();
-}
-
-template <class T>
-std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>>* v) {
-    std::vector<std::vector<double>> v_combined_old, v_combined, v_tmp;
-    std::vector<double> tmp;
-
-    for(const auto& e : v->at(0)) {
-        tmp = {e};
-        v_combined.emplace_back(tmp);
+
+    void DataSet::store_text(std::string &file_path) {
+        //TODO check if stream was successfully opened
+        std::ofstream ofs(file_path);
+        boost::archive::text_oarchive oa(ofs);
+        oa << *this;
+        ofs.close();
     }
 
-    for(unsigned int i = 1; i < v->size(); i++) {  // Iterate through remaining vectors of 'v'
-        v_combined_old = v_combined;
-        v_combined.clear();
+    template<class T>
+    std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>> *v) {
+        std::vector<std::vector<double>> v_combined_old, v_combined, v_tmp;
+        std::vector<double> tmp;
 
-        for(const auto& e : v->at(i)) {
-            for(const auto& vec : v_combined_old) {
-                tmp = vec;
-                tmp.emplace_back(e);
+        for (const auto &e : v->at(0)) {
+            tmp = {e};
+            v_combined.emplace_back(tmp);
+        }
 
-                /* Add only unique elements */
-                if(std::find(v_combined.begin(), v_combined.end(), tmp) == v_combined.end()) {
-                    v_combined.emplace_back(tmp);
+        for (unsigned int i = 1; i < v->size(); i++) {  // Iterate through remaining vectors of 'v'
+            v_combined_old = v_combined;
+            v_combined.clear();
+
+            for (const auto &e : v->at(i)) {
+                for (const auto &vec : v_combined_old) {
+                    tmp = vec;
+                    tmp.emplace_back(e);
+
+                    /* Add only unique elements */
+                    if (std::find(v_combined.begin(), v_combined.end(), tmp) == v_combined.end()) {
+                        v_combined.emplace_back(tmp);
+                    }
                 }
             }
         }
+
+        return v_combined;
     }
 
-    return v_combined;
 }
\ No newline at end of file
diff --git a/src/DataSet/DataSet.h b/src/DataSet/DataSet.h
index 3be44705eacee7e55d19e94ea2a44cd796589875..8df9daae3da6579c4638c25d67fa499534cce4f2 100644
--- a/src/DataSet/DataSet.h
+++ b/src/DataSet/DataSet.h
@@ -9,181 +9,164 @@
 #include <fstream>
 #include <utility>
 #include <vector>
-#include <exception>
 #include <string>
 #include <functional>
 
 #include "../settings.h"
-
-/**
- * Class representing an error caused by an incorrect
- * input/output dimension specification
- */
-class InvalidDimension: public std::runtime_error {
-public:
-
-    /**
-     * Constructor with the general error message
-     */
-    InvalidDimension();
-
-    /**
-     * Constructor with specific error message
-     * @param msg Specific error message
-     */
-    explicit InvalidDimension(std::string msg);
-};
-
-/**
- * Class representing data, which can be used for training
- * and testing purposes.
- */
-class DataSet {
-//    friend class boost::serialization::access;
-
-private:
-
-    /**
-     * Number of elements in the data set
-     */
-    size_t n_elements;
-
-    /**
-     * Dimension of the input
-     */
-    size_t input_dim = 0;
-
-    /**
-     * Dimension of the output
-     */
-    size_t output_dim = 0;
-
-    /**
-     * Stored data in the format of pairs of corresponding
-     * input and output vectors
-     */
-    std::vector<std::pair<std::vector<double>, std::vector<double>>> data;
-
-    template <class T>
-    std::vector<std::vector<T>> cartesian_product(const std::vector<std::vector<T>>* v);
-
-public:
-
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    /**
-     * Constructor reading data from the file
-     * @param file_path Path to the file with stored data set
-     */
-    LIB4NEURO_API DataSet(std::string file_path);
-
-    /**
-     * Constructor accepting data vector
-     * @param data_ptr Pointer to the vector containing data
-     */
-    LIB4NEURO_API DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>>* data_ptr);
-
-    /**
-     * Creates a new data set with input values equidistantly positioned
-     * over the certain interval and the output value
-     * being constant
-     *
-     * Both input and output are 1-dimensional
-     *
-     * @todo add bounds as vectors for multi-dimensional data-sets
-     *
-     * @param lower_bound Lower bound of the input data interval
-     * @param upper_bound Upper bound of the input data interval
-     * @param size Number of input-output pairs generated
-     * @param output Constant output value
-     */
-    LIB4NEURO_API DataSet(double lower_bound, double upper_bound, unsigned int size, double output);
-
-    /**
-     *
-     * @param bounds
-     * @param no_elems_in_one_dim
-     * @param output_func
-     * @param output_dim
-     */
-    LIB4NEURO_API DataSet(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&), unsigned int output_dim);
-
-    /**
-     * Getter for number of elements
-     * @return Number of elements in the data set
-     */
-    LIB4NEURO_API size_t get_n_elements();
-
-    /**
-     * Returns the input dimension
-     * @return Input dimension
-     */
-    LIB4NEURO_API size_t get_input_dim();
-
-
-    /**
-     * Return the output dimension
-     * @return Output dimension
-     */
-    LIB4NEURO_API size_t get_output_dim();
-
-    /**
-     * Getter for the data structure
-     * @return Vector of data
-     */
-    LIB4NEURO_API std::vector<std::pair<std::vector<double>, std::vector<double>>>* get_data();
-
-    /**
-     * Adds a new pair of data to the data set
-     * @param inputs Vector of input data
-     * @param outputs Vector of output data corresponding to the input data
-     */
-    LIB4NEURO_API void add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs);
-
-    //TODO expand method to generate multiple data types - chebyshev etc.
-    /**
-     * Adds a new data with input values equidistantly positioned
-     * over the certain interval and the output value
-     * being constant
-     *
-     * Both input and output are 1-dimensional
-     *
-     * @param lower_bound Lower bound of the input data interval
-     * @param upper_bound Upper bound of the input data interval
-     * @param size Number of input-output pairs generated
-     * @param output Constant output value
-     */
-    LIB4NEURO_API void add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output);
-
-    /**
-     * Adds a new data with input values equidistantly positioned
-     * over the certain interval and the output value
-     * being constant
-     *
-     * Input can have arbitrary many dimensions,
-     * output can be an arbitrary function
-     *
-     * @param bounds Odd values are lower bounds and even values are corresponding upper bounds
-     * @param size Number of input-output pairs generated
-     * @param output_func Function determining output value
-     */
-    LIB4NEURO_API void add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim, std::vector<double> (*output_func)(std::vector<double>&));
-
-    //TODO Chebyshev - ch. interpolation points, i-th point = cos(i*alpha) from 0 to pi
-
-    /**
-     * Prints the data set
-     */
-    LIB4NEURO_API void print_data();
-
-    /**
-     * Stores the DataSet object to the binary file
-     */
-    LIB4NEURO_API void store_text(std::string &file_path);
-};
-
+#include "../Exception/Exceptions.h"
+
+namespace lib4neuro {
+    /**
+     * Class representing data, which can be used for training
+     * and testing purposes.
+     */
+    class DataSet {
+
+    private:
+
+        /**
+         * Number of elements in the data set
+         */
+        size_t n_elements;
+
+        /**
+         * Dimension of the input
+         */
+        size_t input_dim = 0;
+
+        /**
+         * Dimension of the output
+         */
+        size_t output_dim = 0;
+
+        /**
+         * Stored data in the format of pairs of corresponding
+         * input and output vectors
+         */
+        std::vector<std::pair<std::vector<double>, std::vector<double>>> data;
+
+        template<class T>
+        std::vector<std::vector<T>> cartesian_product(const std::vector<std::vector<T>> *v);
+
+    public:
+
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        /**
+         * Constructor reading data from the file
+         * @param file_path Path to the file with stored data set
+         */
+        LIB4NEURO_API DataSet(std::string file_path);
+
+        /**
+         * Constructor accepting data vector
+         * @param data_ptr Pointer to the vector containing data
+         */
+        LIB4NEURO_API DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>> *data_ptr);
+
+        /**
+         * Creates a new data set with input values equidistantly positioned
+         * over the certain interval and the output value
+         * being constant
+         *
+         * Both input and output are 1-dimensional
+         *
+         * @todo add bounds as vectors for multi-dimensional data-sets
+         *
+         * @param lower_bound Lower bound of the input data interval
+         * @param upper_bound Upper bound of the input data interval
+         * @param size Number of input-output pairs generated
+         * @param output Constant output value
+         */
+        LIB4NEURO_API DataSet(double lower_bound, double upper_bound, unsigned int size, double output);
+
+        /**
+         *
+         * @param bounds
+         * @param no_elems_in_one_dim
+         * @param output_func
+         * @param output_dim
+         */
+        LIB4NEURO_API DataSet(std::vector<double> &bounds, unsigned int no_elems_in_one_dim,
+                              std::vector<double> (*output_func)(std::vector<double> &), unsigned int output_dim);
+
+        /**
+         * Getter for number of elements
+         * @return Number of elements in the data set
+         */
+        LIB4NEURO_API size_t get_n_elements();
+
+        /**
+         * Returns the input dimension
+         * @return Input dimension
+         */
+        LIB4NEURO_API size_t get_input_dim();
+
+
+        /**
+         * Return the output dimension
+         * @return Output dimension
+         */
+        LIB4NEURO_API size_t get_output_dim();
+
+        /**
+         * Getter for the data structure
+         * @return Vector of data
+         */
+        LIB4NEURO_API std::vector<std::pair<std::vector<double>, std::vector<double>>> *get_data();
+
+        /**
+         * Adds a new pair of data to the data set
+         * @param inputs Vector of input data
+         * @param outputs Vector of output data corresponding to the input data
+         */
+        LIB4NEURO_API void add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs);
+
+        //TODO expand method to generate multiple data types - chebyshev etc.
+        /**
+         * Adds a new data with input values equidistantly positioned
+         * over the certain interval and the output value
+         * being constant
+         *
+         * Both input and output are 1-dimensional
+         *
+         * @param lower_bound Lower bound of the input data interval
+         * @param upper_bound Upper bound of the input data interval
+         * @param size Number of input-output pairs generated
+         * @param output Constant output value
+         */
+        LIB4NEURO_API void add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output);
+
+        /**
+         * Adds a new data with input values equidistantly positioned
+         * over the certain interval and the output value
+         * being constant
+         *
+         * Input can have arbitrary many dimensions,
+         * output can be an arbitrary function
+         *
+         * @param bounds Odd values are lower bounds and even values are corresponding upper bounds
+         * @param size Number of input-output pairs generated
+         * @param output_func Function determining output value
+         */
+        LIB4NEURO_API void add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim,
+                                              std::vector<double> (*output_func)(std::vector<double> &));
+
+        //TODO Chebyshev - ch. interpolation points, i-th point = cos(i*alpha) from 0 to pi
+
+        /**
+         * Prints the data set
+         */
+        LIB4NEURO_API void print_data();
+
+        /**
+         * Stores the DataSet object to the binary file
+         */
+        LIB4NEURO_API void store_text(std::string &file_path);
+    };
+}
 #endif //INC_4NEURO_DATASET_H
diff --git a/src/DataSet/DataSetSerialization.h b/src/DataSet/DataSetSerialization.h
index 39a1a6bc1b490f1de5dcecd441aed52471709016..120fbd0dc48d67c3905f72c1afdf0a811e2caf06 100644
--- a/src/DataSet/DataSetSerialization.h
+++ b/src/DataSet/DataSetSerialization.h
@@ -14,15 +14,17 @@
 
 #include "DataSet.h"
 
-struct DataSet :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, DataSet& ds, const unsigned int version) {
-        ar & ds.n_elements;
-        ar & ds.input_dim;
-        ar & ds.output_dim;
-        ar & ds.data;
-    }
-};
+namespace lib4neuro {
+    struct DataSet::access {
+        template<class Archive>
+        static void serialize(Archive &ar, DataSet &ds, const unsigned int version) {
+            ar & ds.n_elements;
+            ar & ds.input_dim;
+            ar & ds.output_dim;
+            ar & ds.data;
+        }
+    };
+}
 
 namespace boost {
     namespace serialization {
@@ -35,9 +37,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, DataSet & ds, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::DataSet & ds, const unsigned int version)
         {
-            DataSet::access::serialize(ar, ds, version);
+            lib4neuro::DataSet::access::serialize(ar, ds, version);
         }
 
     } // namespace serialization
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 7f9ad7b2d5a6b452684d49cbdbd744cd7159d514..4120f5380c26509120815697e343a10605c4f746 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -6,93 +6,95 @@
 
 #include "ErrorFunctions.h"
 
+namespace lib4neuro {
 
-size_t ErrorFunction::get_dimension() {
-    return this->dimension;
-}
+    size_t ErrorFunction::get_dimension() {
+        return this->dimension;
+    }
 
-MSE::MSE(NeuralNetwork *net, DataSet *ds) {
-    this->net = net;
-    this->ds = ds;
-    this->dimension = net->get_n_weights() + net->get_n_biases();
-}
+    MSE::MSE(NeuralNetwork *net, DataSet *ds) {
+        this->net = net;
+        this->ds = ds;
+        this->dimension = net->get_n_weights() + net->get_n_biases();
+    }
 
-double MSE::eval(std::vector<double> *weights) {
-    unsigned int dim_out = this->ds->get_output_dim();
+    double MSE::eval(std::vector<double> *weights) {
+        unsigned int dim_out = this->ds->get_output_dim();
 //    unsigned int dim_in = this->ds->get_input_dim();
-    size_t n_elements = this->ds->get_n_elements();
-    double error = 0.0, val;
+        size_t n_elements = this->ds->get_n_elements();
+        double error = 0.0, val;
 
-    std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
+        std::vector<std::pair<std::vector<double>, std::vector<double>>> *data = this->ds->get_data();
 
 //    //TODO instead use something smarter
 //    this->net->copy_weights(weights);
 
-    std::vector<double> output( dim_out );
+        std::vector<double> output(dim_out);
 
-    for(unsigned int i = 0; i < n_elements; ++i){  // Iterate through every element in the test set
+        for (unsigned int i = 0; i < n_elements; ++i) {  // Iterate through every element in the test set
 
-        this->net->eval_single(data->at(i).first, output, weights);  // Compute the net output and store it into 'output' variable
+            this->net->eval_single(data->at(i).first, output,
+                                   weights);  // Compute the net output and store it into 'output' variable
 
 
 //        printf("errors: ");
-        for(unsigned int j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
+            for (unsigned int j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
 
-            val = output[j] - data->at(i).second[j];
-            error += val * val;
+                val = output[j] - data->at(i).second[j];
+                error += val * val;
 
 //            printf("%f, ", val * val);
-        }
+            }
 //        printf("\n");
 
-    }
+        }
 
 //    printf("n_elements: %d\n", n_elements);
-    return error/n_elements;
-}
-
-ErrorSum::ErrorSum() {
-    this->summand = nullptr;
-    this->summand_coefficient = nullptr;
-    this->dimension = 0;
-}
-
-ErrorSum::~ErrorSum(){
-    if( this->summand ){
-        delete this->summand;
+        return error / n_elements;
     }
-    if( this->summand_coefficient ){
-        delete this->summand_coefficient;
-    }
-}
 
-double ErrorSum::eval(std::vector<double> *weights) {
-    double output = 0.0;
+    ErrorSum::ErrorSum() {
+        this->summand = nullptr;
+        this->summand_coefficient = nullptr;
+        this->dimension = 0;
+    }
 
-    for( unsigned int i = 0; i < this->summand->size(); ++i ){
-        output += this->summand->at( i )->eval( weights ) * this->summand_coefficient->at( i );
+    ErrorSum::~ErrorSum() {
+        if (this->summand) {
+            delete this->summand;
+        }
+        if (this->summand_coefficient) {
+            delete this->summand_coefficient;
+        }
     }
 
-    return output;
-}
+    double ErrorSum::eval(std::vector<double> *weights) {
+        double output = 0.0;
 
-void ErrorSum::add_error_function( ErrorFunction *F, double alpha ) {
-    if(!this->summand){
-        this->summand = new std::vector<ErrorFunction*>(0);
-    }
-    this->summand->push_back( F );
+        for (unsigned int i = 0; i < this->summand->size(); ++i) {
+            output += this->summand->at(i)->eval(weights) * this->summand_coefficient->at(i);
+        }
 
-    if(!this->summand_coefficient){
-        this->summand_coefficient = new std::vector<double>(0);
+        return output;
     }
-    this->summand_coefficient->push_back( alpha );
 
-    if(F->get_dimension() > this->dimension){
-        this->dimension = F->get_dimension();
+    void ErrorSum::add_error_function(ErrorFunction *F, double alpha) {
+        if (!this->summand) {
+            this->summand = new std::vector<ErrorFunction *>(0);
+        }
+        this->summand->push_back(F);
+
+        if (!this->summand_coefficient) {
+            this->summand_coefficient = new std::vector<double>(0);
+        }
+        this->summand_coefficient->push_back(alpha);
+
+        if (F->get_dimension() > this->dimension) {
+            this->dimension = F->get_dimension();
+        }
     }
-}
 
-size_t ErrorSum::get_dimension() {
+    size_t ErrorSum::get_dimension() {
 //    if(!this->dimension) {
 //        size_t max = 0;
 //        for(auto e : *this->summand) {
@@ -103,5 +105,7 @@ size_t ErrorSum::get_dimension() {
 //
 //        this->dimension = max;
 //    }
-    return this->dimension;
+        return this->dimension;
+    }
+
 }
\ No newline at end of file
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index a374ea1b4e035f38db8c46241eee60111268e85f..67595b067329c399c99004a2dcb965e944a7a0af 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -10,94 +10,96 @@
 #include "../Network/NeuralNetwork.h"
 #include "../DataSet/DataSet.h"
 
-
-enum ErrorFunctionType{
-    ErrorFuncMSE
-};
-
-
-class ErrorFunction {
-public:
-
-    /**
-     *
-     * @param weights
-     * @return
-     */
-    virtual double eval(std::vector<double>* weights = nullptr) = 0;
-    
-    /**
-     * 
-     * @return 
-     */
-    LIB4NEURO_API virtual size_t get_dimension();
-
-protected:
-
-    /**
-     *
-     */
-    size_t dimension = 0;
-};
-
-class MSE : public ErrorFunction {
-
-public:
-    /**
-     * Constructor for single neural network
-     * @param net
-     * @param ds
-     */
-    LIB4NEURO_API MSE(NeuralNetwork* net, DataSet* ds);
-
-    /**
-     *
-     * @param weights
-     * @return
-     */
-    LIB4NEURO_API virtual double eval(std::vector<double>* weights = nullptr);
-
-private:
-
-    NeuralNetwork* net;
-    DataSet* ds;
-};
-
-class ErrorSum : public ErrorFunction{
-public:
-    /**
-     *
-     */
-    LIB4NEURO_API ErrorSum();
-
-    /**
-     *
-     */
-    LIB4NEURO_API ~ErrorSum();
-
-    /**
-     *
-     * @param weights
-     * @return
-     */
-    LIB4NEURO_API virtual double eval(std::vector<double>* weights = nullptr);
-
-    /**
-     *
-     * @param F
-     */
-    LIB4NEURO_API void add_error_function( ErrorFunction *F, double alpha = 1.0 );
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API size_t get_dimension() override;
-
-private:
-    std::vector<ErrorFunction*>* summand;
-    std::vector<double> *summand_coefficient;
-};
-
+namespace lib4neuro {
+
+    enum ErrorFunctionType {
+        ErrorFuncMSE
+    };
+
+
+    class ErrorFunction {
+    public:
+
+        /**
+         *
+         * @param weights
+         * @return
+         */
+        virtual double eval(std::vector<double> *weights = nullptr) = 0;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_dimension();
+
+    protected:
+
+        /**
+         *
+         */
+        size_t dimension = 0;
+    };
+
+    class MSE : public ErrorFunction {
+
+    public:
+        /**
+         * Constructor for single neural network
+         * @param net
+         * @param ds
+         */
+        LIB4NEURO_API MSE(NeuralNetwork *net, DataSet *ds);
+
+        /**
+         *
+         * @param weights
+         * @return
+         */
+        LIB4NEURO_API virtual double eval(std::vector<double> *weights = nullptr);
+
+    private:
+
+        NeuralNetwork *net;
+        DataSet *ds;
+    };
+
+    class ErrorSum : public ErrorFunction {
+    public:
+        /**
+         *
+         */
+        LIB4NEURO_API ErrorSum();
+
+        /**
+         *
+         */
+        LIB4NEURO_API ~ErrorSum();
+
+        /**
+         *
+         * @param weights
+         * @return
+         */
+        LIB4NEURO_API virtual double eval(std::vector<double> *weights = nullptr);
+
+        /**
+         *
+         * @param F
+         */
+        LIB4NEURO_API void add_error_function(ErrorFunction *F, double alpha = 1.0);
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API size_t get_dimension() override;
+
+    private:
+        std::vector<ErrorFunction *> *summand;
+        std::vector<double> *summand_coefficient;
+    };
+
+}
 
 #endif //INC_4NEURO_ERRORFUNCTION_H
diff --git a/src/General/ExprtkWrapper.cpp b/src/General/ExprtkWrapper.cpp
index 7abf68596a225b61f655e3bbf05e8db300aaa5a1..0b3c3920a03d0a9a845b4fdd97d40da3cab226ac 100644
--- a/src/General/ExprtkWrapper.cpp
+++ b/src/General/ExprtkWrapper.cpp
@@ -10,13 +10,14 @@
 #include "exprtk.hpp"
 #include "ExprtkWrapper.h"
 #include "ExprtkWrapperSerialization.h"
-#include "../exceptions.h"
+#include "../Exception/Exceptions.h"
 
 BOOST_CLASS_EXPORT_IMPLEMENT(ExprtkWrapper);
 
 ExprtkWrapper::ExprtkWrapper() {
     // Because of serialization
     // TODO implement?
+    throw NotImplementedException();
 }
 
 ExprtkWrapper::ExprtkWrapper( std::string expression_string ) {
@@ -38,7 +39,6 @@ ExprtkWrapper::ExprtkWrapper( std::string expression_string ) {
     this->p_impl->parser->compile(this->p_impl->expression_str, *this->p_impl->expression );
 }
 
-
 ExprtkWrapper::~ExprtkWrapper() {
 
     if( this->p_impl->expression ){
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index c5d70510263310a40836dce37f853b830e79fe7e..21679bc6761fd71c27ed60c48c447dfa64b67903 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -59,7 +59,7 @@ void Particle::randomize_velocity() {
     }
 }
 
-Particle::Particle(ErrorFunction* ef, double *domain_bounds) {
+Particle::Particle(lib4neuro::ErrorFunction* ef, double *domain_bounds) {
     //TODO better generating of random numbers
     this->domain_bounds = domain_bounds;
     this->coordinate_dim = ef->get_dimension();
@@ -190,7 +190,7 @@ void Particle::print_coordinate() {
     printf("%10.8f\n", (*this->coordinate)[this->coordinate_dim - 1]);
 }
 
-ParticleSwarm::ParticleSwarm(ErrorFunction* ef, std::vector<double> *domain_bounds,
+ParticleSwarm::ParticleSwarm(lib4neuro::ErrorFunction* ef, std::vector<double> *domain_bounds,
                              double c1, double c2, double w, size_t n_particles, size_t iter_max) {
     srand(time(NULL));
 
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index e89704a94a4e602b071639c0850d795dafe39c88..f31faef3264c7efcd360c7c292e94d8a7348c67e 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -11,8 +11,10 @@
 #include "../settings.h"
 #include "../ErrorFunction/ErrorFunctions.h"
 
-
-class Particle{
+/**
+ *
+ */
+class Particle {
 private:
 
     size_t coordinate_dim;
@@ -28,7 +30,7 @@ private:
 
     double current_val;
 
-    ErrorFunction* ef;
+    lib4neuro::ErrorFunction *ef;
 
     double *domain_bounds;
 
@@ -50,14 +52,15 @@ public:
      *
      * @param f_dim
      */
-    LIB4NEURO_API Particle(ErrorFunction* ef, double *domain_bounds);
-    LIB4NEURO_API ~Particle( );
+    LIB4NEURO_API Particle(lib4neuro::ErrorFunction *ef, double *domain_bounds);
+
+    LIB4NEURO_API ~Particle();
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_coordinate();
+    LIB4NEURO_API std::vector<double> *get_coordinate();
 
     /**
      *
@@ -85,7 +88,9 @@ public:
      * @param glob_min_coord
      * @param penalty_coef
      */
-    LIB4NEURO_API double change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef=0.25);
+    LIB4NEURO_API double change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord,
+                                           std::vector<std::vector<double>> &global_min_vec,
+                                           double penalty_coef = 0.25);
 };
 
 
@@ -96,12 +101,12 @@ private:
     /**
      *
      */
-    Particle** particle_swarm = nullptr;
+    Particle **particle_swarm = nullptr;
 
     /**
      *
      */
-    ErrorFunction* f;
+    lib4neuro::ErrorFunction *f;
 
     size_t func_dim;
 
@@ -130,13 +135,13 @@ protected:
      * @param val
      * @return
      */
-    LIB4NEURO_API Particle* determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val);
+    LIB4NEURO_API Particle *determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val);
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_centroid_coordinates();
+    LIB4NEURO_API std::vector<double> *get_centroid_coordinates();
 
     /**
      *
@@ -145,7 +150,7 @@ protected:
      * @param n
      * @return
      */
-    LIB4NEURO_API double get_euclidean_distance(std::vector<double>* a, std::vector<double>* b);
+    LIB4NEURO_API double get_euclidean_distance(std::vector<double> *a, std::vector<double> *b);
 
 public:
 
@@ -160,13 +165,15 @@ public:
      * @param n_particles
      * @param iter_max
      */
-     //TODO make domain_bounds constant
-    LIB4NEURO_API ParticleSwarm( ErrorFunction* ef, std::vector<double> *domain_bounds, double c1 = 1.711897, double c2 = 1.711897, double w = 0.711897, size_t n_particles = 50, size_t iter_max = 1000 );
+    //TODO make domain_bounds constant
+    LIB4NEURO_API ParticleSwarm(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bounds, double c1 = 1.711897,
+                                double c2 = 1.711897, double w = 0.711897, size_t n_particles = 50,
+                                size_t iter_max = 1000);
 
     /**
      *
      */
-    LIB4NEURO_API ~ParticleSwarm( );
+    LIB4NEURO_API ~ParticleSwarm();
 
 
     /**
@@ -175,16 +182,15 @@ public:
      * @param epsilon
      * @param delta
      */
-    LIB4NEURO_API void optimize( double gamma, double epsilon, double delta=0.7 );
+    LIB4NEURO_API void optimize(double gamma, double epsilon, double delta = 0.7);
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API std::vector<double>* get_solution();
+    LIB4NEURO_API std::vector<double> *get_solution();
 
 
 };
 
-
 #endif //INC_4NEURO_PARTICLESWARM_H
diff --git a/src/NetConnection/ConnectionFunctionGeneral.h b/src/NetConnection/ConnectionFunctionGeneral.h
index bbe56e056e09591eb4a2ceef63cc58d81b40138b..6892caf19ad7dded05072a5c3006b2eb8aa94e6f 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.h
+++ b/src/NetConnection/ConnectionFunctionGeneral.h
@@ -9,7 +9,6 @@
 #define INC_4NEURO_CONNECTIONWEIGHT_H
 
 #include "../settings.h"
-//#include "../ISerializable.h"
 
 #include <functional>
 #include <vector>
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index 7119940b9974918ba5be460d61b22cac4c676490..8d095a5fe8890e9adcb0d06a06f069fd3c27fdb9 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -5,134 +5,139 @@
  * @date 13.6.18 - 
  */
 
+#include <iostream>
+
+#include "../message.h"
 #include "NeuralNetwork.h"
 #include "NeuralNetworkSerialization.h"
 
-NeuralNetwork::NeuralNetwork() {
-    this->neurons = new std::vector<Neuron*>(0);
-    this->neuron_biases = new std::vector<double>(0);
-    this->neuron_potentials = new std::vector<double>(0);
-    this->neuron_bias_indices = new std::vector<int>(0);
+namespace lib4neuro {
+    NeuralNetwork::NeuralNetwork() {
+        this->neurons = new std::vector<Neuron *>(0);
+        this->neuron_biases = new std::vector<double>(0);
+        this->neuron_potentials = new std::vector<double>(0);
+        this->neuron_bias_indices = new std::vector<int>(0);
 
-    this->connection_weights =new std::vector<double>(0);
-    this->connection_list = new std::vector<ConnectionFunctionGeneral*>(0);
-    this->inward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>>*>(0);
-    this->outward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>>*>(0);
+        this->connection_weights = new std::vector<double>(0);
+        this->connection_list = new std::vector<ConnectionFunctionGeneral *>(0);
+        this->inward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
+        this->outward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
 
-    this->neuron_layers_feedforward = new std::vector<std::vector<size_t>*>(0);
-    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
+        this->neuron_layers_feedforward = new std::vector<std::vector<size_t> *>(0);
+        this->neuron_layers_feedbackward = new std::vector<std::vector<size_t> *>(0);
 
-    this->input_neuron_indices = new std::vector<size_t>(0);
-    this->output_neuron_indices = new std::vector<size_t>(0);
+        this->input_neuron_indices = new std::vector<size_t>(0);
+        this->output_neuron_indices = new std::vector<size_t>(0);
 
-    this->delete_weights = true;
-    this->delete_biases = true;
-    this->layers_analyzed = false;
-}
+        this->delete_weights = true;
+        this->delete_biases = true;
+        this->layers_analyzed = false;
+    }
 
-NeuralNetwork::NeuralNetwork(std::string filepath) {
-    std::ifstream ifs(filepath);
-    boost::archive::text_iarchive ia(ifs);
-    ia >> *this;
-    ifs.close();
-}
+    NeuralNetwork::NeuralNetwork(std::string filepath) {
+        std::ifstream ifs(filepath);
+        boost::archive::text_iarchive ia(ifs);
+        ia >> *this;
+        ifs.close();
+    }
 
-NeuralNetwork::~NeuralNetwork() {
+    NeuralNetwork::~NeuralNetwork() {
 
-    if(this->neurons){
-        for( auto n: *(this->neurons) ){
-            delete n;
-            n = nullptr;
+        if (this->neurons) {
+            for (auto n: *(this->neurons)) {
+                delete n;
+                n = nullptr;
+            }
+            delete this->neurons;
+            this->neurons = nullptr;
         }
-        delete this->neurons;
-        this->neurons = nullptr;
-    }
 
-    if(this->neuron_potentials){
-        delete this->neuron_potentials;
-        this->neuron_potentials = nullptr;
-    }
+        if (this->neuron_potentials) {
+            delete this->neuron_potentials;
+            this->neuron_potentials = nullptr;
+        }
 
-    if(this->neuron_bias_indices){
-        delete this->neuron_bias_indices;
-        this->neuron_bias_indices = nullptr;
-    }
+        if (this->neuron_bias_indices) {
+            delete this->neuron_bias_indices;
+            this->neuron_bias_indices = nullptr;
+        }
 
-    if(this->output_neuron_indices){
-        delete this->output_neuron_indices;
-        this->output_neuron_indices = nullptr;
-    }
+        if (this->output_neuron_indices) {
+            delete this->output_neuron_indices;
+            this->output_neuron_indices = nullptr;
+        }
 
-    if(this->input_neuron_indices){
-        delete this->input_neuron_indices;
-        this->input_neuron_indices = nullptr;
-    }
+        if (this->input_neuron_indices) {
+            delete this->input_neuron_indices;
+            this->input_neuron_indices = nullptr;
+        }
 
-    if(this->connection_weights && this->delete_weights){
-        delete this->connection_weights;
-        this->connection_weights = nullptr;
-    }
+        if (this->connection_weights && this->delete_weights) {
+            delete this->connection_weights;
+            this->connection_weights = nullptr;
+        }
 
-    if(this->neuron_biases && this->delete_biases){
-        delete this->neuron_biases;
-        this->neuron_biases = nullptr;
-    }
+        if (this->neuron_biases && this->delete_biases) {
+            delete this->neuron_biases;
+            this->neuron_biases = nullptr;
+        }
 
-    if(this->connection_list){
+        if (this->connection_list) {
 
-        if(this->delete_weights){
-            for(auto c: *this->connection_list){
-                delete c;
-                c = nullptr;
+            if (this->delete_weights) {
+                for (auto c: *this->connection_list) {
+                    delete c;
+                    c = nullptr;
+                }
             }
+            delete this->connection_list;
+            this->connection_list = nullptr;
         }
-        delete this->connection_list;
-        this->connection_list = nullptr;
-    }
 
-    if(this->inward_adjacency){
-        for(auto e: *this->inward_adjacency){
-            if(e){
-                delete e;
-                e = nullptr;
+        if (this->inward_adjacency) {
+            for (auto e: *this->inward_adjacency) {
+                if (e) {
+                    delete e;
+                    e = nullptr;
+                }
             }
+            delete this->inward_adjacency;
+            this->inward_adjacency = nullptr;
         }
-        delete this->inward_adjacency;
-        this->inward_adjacency = nullptr;
-    }
 
-    if(this->outward_adjacency){
-        for(auto e: *this->outward_adjacency){
-            if(e){
-                delete e;
-                e = nullptr;
+        if (this->outward_adjacency) {
+            for (auto e: *this->outward_adjacency) {
+                if (e) {
+                    delete e;
+                    e = nullptr;
+                }
             }
+            delete this->outward_adjacency;
+            this->outward_adjacency = nullptr;
         }
-        delete this->outward_adjacency;
-        this->outward_adjacency = nullptr;
-    }
 
-    if(this->neuron_layers_feedforward){
-        for(auto e: *this->neuron_layers_feedforward){
-            delete e;
-            e = nullptr;
+        if (this->neuron_layers_feedforward) {
+            for (auto e: *this->neuron_layers_feedforward) {
+                delete e;
+                e = nullptr;
+            }
+            delete this->neuron_layers_feedforward;
+            this->neuron_layers_feedforward = nullptr;
         }
-        delete this->neuron_layers_feedforward;
-        this->neuron_layers_feedforward = nullptr;
-    }
 
-    if(this->neuron_layers_feedbackward){
-        for(auto e: *this->neuron_layers_feedbackward){
-            delete e;
-            e = nullptr;
+        if (this->neuron_layers_feedbackward) {
+            for (auto e: *this->neuron_layers_feedbackward) {
+                delete e;
+                e = nullptr;
+            }
+            delete this->neuron_layers_feedbackward;
+            this->neuron_layers_feedbackward = nullptr;
         }
-        delete this->neuron_layers_feedbackward;
-        this->neuron_layers_feedbackward = nullptr;
     }
-}
 
-NeuralNetwork* NeuralNetwork::get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices){
-    NeuralNetwork *output_net = nullptr;
+    NeuralNetwork *
+    NeuralNetwork::get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices) {
+        NeuralNetwork *output_net = nullptr;
 // TODO rework due to the changed structure of the class
 //    Neuron * active_neuron, * target_neuron;
 //
@@ -349,434 +354,433 @@ NeuralNetwork* NeuralNetwork::get_subnet(std::vector<size_t> &input_neuron_indic
 //    delete [] active_neuron_set;
 //
 //
-    return output_net;
-}
+        return output_net;
+    }
 
-size_t NeuralNetwork::add_neuron(Neuron* n, BIAS_TYPE bt, size_t bias_idx) {
+    size_t NeuralNetwork::add_neuron(Neuron *n, BIAS_TYPE bt, size_t bias_idx) {
 
-    if( bt == BIAS_TYPE::NO_BIAS ){
-        this->neuron_bias_indices->push_back(-1);
-    }
-    else if( bt == BIAS_TYPE::NEXT_BIAS ){
-        this->neuron_bias_indices->push_back((int)this->neuron_biases->size());
-        this->neuron_biases->resize(this->neuron_biases->size() + 1);
-    }
-    else if( bt == BIAS_TYPE::EXISTING_BIAS ){
-        if( bias_idx >= this->neuron_biases->size()){
-            std::cerr << "The supplied bias index is too large!\n" << std::endl;
+        if (bt == BIAS_TYPE::NO_BIAS) {
+            this->neuron_bias_indices->push_back(-1);
+        } else if (bt == BIAS_TYPE::NEXT_BIAS) {
+            this->neuron_bias_indices->push_back((int) this->neuron_biases->size());
+            this->neuron_biases->resize(this->neuron_biases->size() + 1);
+        } else if (bt == BIAS_TYPE::EXISTING_BIAS) {
+            if (bias_idx >= this->neuron_biases->size()) {
+                std::cerr << "The supplied bias index is too large!\n" << std::endl;
+            }
+            this->neuron_bias_indices->push_back((int) bias_idx);
         }
-        this->neuron_bias_indices->push_back((int)bias_idx);
-    }
 
-    this->outward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
-    this->inward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
+        this->outward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
+        this->inward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
 
-    this->neurons->push_back(n);
+        this->neurons->push_back(n);
 
-    this->layers_analyzed = false;
-    return this->neurons->size() - 1;
-}
+        this->layers_analyzed = false;
+        return this->neurons->size() - 1;
+    }
 
-size_t NeuralNetwork::add_connection_simple( size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct, size_t weight_idx ) {
+    size_t
+    NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct, size_t weight_idx) {
 
-    ConnectionFunctionIdentity *con_weight_u1u2;
-    if( sct == SIMPLE_CONNECTION_TYPE::UNITARY_WEIGHT ){
-        con_weight_u1u2 = new ConnectionFunctionIdentity( );
-    }
-    else{
-        if( sct == SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT ){
-            weight_idx = this->connection_weights->size();
-            this->connection_weights->resize(weight_idx + 1);
-        }
-        else if( sct == SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT ){
-            if( weight_idx >= this->connection_weights->size()){
-                std::cerr << "The supplied connection weight index is too large!\n" << std::endl;
+        ConnectionFunctionIdentity *con_weight_u1u2;
+        if (sct == SIMPLE_CONNECTION_TYPE::UNITARY_WEIGHT) {
+            con_weight_u1u2 = new ConnectionFunctionIdentity();
+        } else {
+            if (sct == SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT) {
+                weight_idx = this->connection_weights->size();
+                this->connection_weights->resize(weight_idx + 1);
+            } else if (sct == SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT) {
+                if (weight_idx >= this->connection_weights->size()) {
+                    std::cerr << "The supplied connection weight index is too large!\n" << std::endl;
+                }
             }
-        }
 
-        con_weight_u1u2 = new ConnectionFunctionIdentity( weight_idx );
-    }
+            con_weight_u1u2 = new ConnectionFunctionIdentity(weight_idx);
+        }
 
-    size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
+        size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
 
-    this->add_outward_connection(n1_idx, n2_idx, conn_idx);
-    this->add_inward_connection(n2_idx, n1_idx, conn_idx);
+        this->add_outward_connection(n1_idx, n2_idx, conn_idx);
+        this->add_inward_connection(n2_idx, n1_idx, conn_idx);
 
-    this->layers_analyzed = false;
+        this->layers_analyzed = false;
 
-    return this->connection_list->size() - 1;
-}
+        return this->connection_list->size() - 1;
+    }
 
-void NeuralNetwork::add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx,
-                                            NeuralNetwork &parent_network) {
+    void NeuralNetwork::add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx,
+                                                NeuralNetwork &parent_network) {
 
-    size_t conn_idx = this->add_new_connection_to_list(parent_network.connection_list->at( connection_idx ));
+        size_t conn_idx = this->add_new_connection_to_list(parent_network.connection_list->at(connection_idx));
 
-    this->add_outward_connection(n1_idx, n2_idx, conn_idx);
-    this->add_inward_connection(n2_idx, n1_idx, conn_idx);
+        this->add_outward_connection(n1_idx, n2_idx, conn_idx);
+        this->add_inward_connection(n2_idx, n1_idx, conn_idx);
 
-    this->layers_analyzed = false;
-}
+        this->layers_analyzed = false;
+    }
 
-void NeuralNetwork::copy_parameter_space(std::vector<double> *parameters) {
-    if(parameters != nullptr){
-        for(unsigned int i = 0; i < this->connection_weights->size(); ++i){
-            (*this->connection_weights)[i] = (*parameters)[i];
-        }
+    void NeuralNetwork::copy_parameter_space(std::vector<double> *parameters) {
+        if (parameters != nullptr) {
+            for (unsigned int i = 0; i < this->connection_weights->size(); ++i) {
+                (*this->connection_weights)[i] = (*parameters)[i];
+            }
 
-        for(unsigned int i = 0; i < this->neuron_biases->size(); ++i){
-            (*this->neuron_biases)[i] = (*parameters)[i + this->connection_weights->size()];
+            for (unsigned int i = 0; i < this->neuron_biases->size(); ++i) {
+                (*this->neuron_biases)[i] = (*parameters)[i + this->connection_weights->size()];
+            }
         }
     }
-}
 
-void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork &parent_network) {
+    void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork &parent_network) {
 
-    if(this->connection_weights){
-        delete connection_weights;
-    }
-
-    if(this->neuron_biases){
-        delete this->neuron_biases;
-    }
+        if (this->connection_weights) {
+            delete connection_weights;
+        }
 
-    this->connection_weights = parent_network.connection_weights;
-    this->neuron_biases = parent_network.neuron_biases;
+        if (this->neuron_biases) {
+            delete this->neuron_biases;
+        }
 
-    this->delete_biases = false;
-    this->delete_weights = false;
-}
+        this->connection_weights = parent_network.connection_weights;
+        this->neuron_biases = parent_network.neuron_biases;
 
-void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> * custom_weights_and_biases) {
-    if((this->input_neuron_indices->size() * this->output_neuron_indices->size()) <= 0){
-        std::cerr << "Input and output neurons have not been specified\n" << std::endl;
-        exit(-1);
+        this->delete_biases = false;
+        this->delete_weights = false;
     }
 
+    void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double> &output,
+                                    std::vector<double> *custom_weights_and_biases) {
+        if ((this->input_neuron_indices->size() * this->output_neuron_indices->size()) <= 0) {
+            std::cerr << "Input and output neurons have not been specified\n" << std::endl;
+            exit(-1);
+        }
 
-    if(this->input_neuron_indices->size() != input.size()){
-        std::cerr << "Error, input size != Network input size\n" << std::endl;
-        exit(-1);
-    }
 
-    if(this->output_neuron_indices->size() != output.size()){
-        std::cerr << "Error, output size != Network output size\n" << std::endl;
-        exit(-1);
-    }
-    double potential, bias;
-    int bias_idx;
+        if (this->input_neuron_indices->size() != input.size()) {
+            std::cerr << "Error, input size != Network input size\n" << std::endl;
+            exit(-1);
+        }
 
-    this->copy_parameter_space( custom_weights_and_biases );
+        if (this->output_neuron_indices->size() != output.size()) {
+            std::cerr << "Error, output size != Network output size\n" << std::endl;
+            exit(-1);
+        }
+        double potential, bias;
+        int bias_idx;
 
-    this->analyze_layer_structure();
+        this->copy_parameter_space(custom_weights_and_biases);
 
-    /* reset of the output and the neuron potentials */
-    std::fill(output.begin(), output.end(), 0.0);
-    std::fill(this->neuron_potentials->begin(), this->neuron_potentials->end(), 0.0);
+        this->analyze_layer_structure();
 
-    /* set the potentials of the input neurons */
-    for(size_t i = 0; i < this->input_neuron_indices->size(); ++i){
-        this->neuron_potentials->at( this->input_neuron_indices->at(i) ) = input[ i ];
-    }
+        /* reset of the output and the neuron potentials */
+        std::fill(output.begin(), output.end(), 0.0);
+        std::fill(this->neuron_potentials->begin(), this->neuron_potentials->end(), 0.0);
 
-    /* we iterate through all the feed-forward layers and transfer the signals */
-    for( auto layer: *this->neuron_layers_feedforward){
-        /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
+        /* set the potentials of the input neurons */
+        for (size_t i = 0; i < this->input_neuron_indices->size(); ++i) {
+            this->neuron_potentials->at(this->input_neuron_indices->at(i)) = input[i];
+        }
 
-        for( auto si: *layer ){
-            bias = 0.0;
-            bias_idx = this->neuron_bias_indices->at( si );
-            if( bias_idx >= 0 ){
-                bias = this->neuron_biases->at( bias_idx );
-            }
-            potential = this->neurons->at(si)->activate(this->neuron_potentials->at( si ), bias);
+        /* we iterate through all the feed-forward layers and transfer the signals */
+        for (auto layer: *this->neuron_layers_feedforward) {
+            /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
+
+            for (auto si: *layer) {
+                bias = 0.0;
+                bias_idx = this->neuron_bias_indices->at(si);
+                if (bias_idx >= 0) {
+                    bias = this->neuron_biases->at(bias_idx);
+                }
+                potential = this->neurons->at(si)->activate(this->neuron_potentials->at(si), bias);
 
-            for(auto c: *this->outward_adjacency->at( si )){
-                size_t ti = c.first;
-                size_t ci = c.second;
+                for (auto c: *this->outward_adjacency->at(si)) {
+                    size_t ti = c.first;
+                    size_t ci = c.second;
 
-                this->neuron_potentials->at( ti ) += this->connection_list->at( ci )->eval( *this->connection_weights ) * potential;
+                    this->neuron_potentials->at(ti) +=
+                            this->connection_list->at(ci)->eval(*this->connection_weights) * potential;
+                }
             }
         }
-    }
 
-    unsigned int i = 0;
-    for(auto oi: *this->output_neuron_indices){
-        bias = 0.0;
-        bias_idx = this->neuron_bias_indices->at( oi );
-        if( bias_idx >= 0 ){
-            bias = this->neuron_biases->at( bias_idx );
+        unsigned int i = 0;
+        for (auto oi: *this->output_neuron_indices) {
+            bias = 0.0;
+            bias_idx = this->neuron_bias_indices->at(oi);
+            if (bias_idx >= 0) {
+                bias = this->neuron_biases->at(bias_idx);
+            }
+            output[i] = this->neurons->at(oi)->activate(this->neuron_potentials->at(oi), bias);
+            ++i;
         }
-        output[i] = this->neurons->at( oi )->activate( this->neuron_potentials->at( oi ), bias );
-        ++i;
     }
-}
 
-void NeuralNetwork::randomize_weights( ) {
+    void NeuralNetwork::randomize_weights() {
 
-    boost::random::mt19937 gen;
+        boost::random::mt19937 gen;
 
-    // Init weight guess ("optimal" for logistic activation functions)
-    double r = 4 * sqrt(6./(this->connection_weights->size()));
+        // Init weight guess ("optimal" for logistic activation functions)
+        double r = 4 * sqrt(6. / (this->connection_weights->size()));
 
-    boost::random::uniform_real_distribution<> dist(-r, r);
+        boost::random::uniform_real_distribution<> dist(-r, r);
 
-    for(size_t i = 0; i < this->connection_weights->size(); i++) {
-        this->connection_weights->at(i) = dist(gen);
+        for (size_t i = 0; i < this->connection_weights->size(); i++) {
+            this->connection_weights->at(i) = dist(gen);
+        }
     }
-}
 
-void NeuralNetwork::randomize_biases( ) {
+    void NeuralNetwork::randomize_biases() {
 
-    boost::random::mt19937 gen;
+        boost::random::mt19937 gen;
 
-    // Init weight guess ("optimal" for logistic activation functions)
-    boost::random::uniform_real_distribution<> dist(-1, 1);
-    for(size_t i = 0; i < this->neuron_biases->size(); i++) {
-        this->neuron_biases->at(i) = dist(gen);
+        // Init weight guess ("optimal" for logistic activation functions)
+        boost::random::uniform_real_distribution<> dist(-1, 1);
+        for (size_t i = 0; i < this->neuron_biases->size(); i++) {
+            this->neuron_biases->at(i) = dist(gen);
+        }
     }
-}
-
-size_t NeuralNetwork::get_n_inputs() {
-    return this->input_neuron_indices->size();
-}
 
-size_t  NeuralNetwork::get_n_outputs() {
-    return this->output_neuron_indices->size();
-}
-
-size_t NeuralNetwork::get_n_weights() {
-    return this->connection_weights->size();
-}
-
-size_t NeuralNetwork::get_n_biases() {
-    return this->neuron_biases->size();
-}
-
-int NeuralNetwork::get_neuron_bias_index(size_t neuron_idx) {
-    return this->neuron_bias_indices->at( neuron_idx );
-}
+    size_t NeuralNetwork::get_n_inputs() {
+        return this->input_neuron_indices->size();
+    }
 
-size_t NeuralNetwork::get_n_neurons() {
-    return this->neurons->size();
-}
+    size_t NeuralNetwork::get_n_outputs() {
+        return this->output_neuron_indices->size();
+    }
 
-void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
-    if( !this->input_neuron_indices ){
-        this->input_neuron_indices = new std::vector<size_t>(input_neurons_indices);
+    size_t NeuralNetwork::get_n_weights() {
+        return this->connection_weights->size();
     }
-    else{
-        delete this->input_neuron_indices;
-        this->input_neuron_indices = new std::vector<size_t>(input_neurons_indices);
+
+    size_t NeuralNetwork::get_n_biases() {
+        return this->neuron_biases->size();
     }
-}
 
-void NeuralNetwork::specify_output_neurons(std::vector<size_t> &output_neurons_indices) {
-    if( !this->output_neuron_indices ){
-        this->output_neuron_indices = new std::vector<size_t>(output_neurons_indices);
+    int NeuralNetwork::get_neuron_bias_index(size_t neuron_idx) {
+        return this->neuron_bias_indices->at(neuron_idx);
     }
-    else{
-        delete this->output_neuron_indices;
-        this->output_neuron_indices = new std::vector<size_t>(output_neurons_indices);
+
+    size_t NeuralNetwork::get_n_neurons() {
+        return this->neurons->size();
     }
-}
 
-void NeuralNetwork::print_weights() {
-    printf("Connection weights: ");
-    if(this->connection_weights){
-        for( size_t i = 0; i < this->connection_weights->size() - 1; ++i){
-            printf("%f, ", this->connection_weights->at(i));
+    void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
+        if (!this->input_neuron_indices) {
+            this->input_neuron_indices = new std::vector<size_t>(input_neurons_indices);
+        } else {
+            delete this->input_neuron_indices;
+            this->input_neuron_indices = new std::vector<size_t>(input_neurons_indices);
         }
-        printf("%f", this->connection_weights->at(this->connection_weights->size() - 1));
     }
 
-    printf("\n");
-}
-
-void NeuralNetwork::print_stats(){
-    std::cout << "Number of neurons: " << this->neurons->size() << std::endl
-              << "Number of connections: " << this->connection_list->size() << std::endl
-              << "Number of active weights: " << this->connection_weights->size() << std::endl
-              << "Number of active biases: " << this->neuron_biases->size() << std::endl;
-}
-
-std::vector<double>* NeuralNetwork::get_parameter_ptr_biases() {
-    return this->neuron_biases;
-}
-
-std::vector<double>* NeuralNetwork::get_parameter_ptr_weights() {
-    return this->connection_weights;
-}
+    void NeuralNetwork::specify_output_neurons(std::vector<size_t> &output_neurons_indices) {
+        if (!this->output_neuron_indices) {
+            this->output_neuron_indices = new std::vector<size_t>(output_neurons_indices);
+        } else {
+            delete this->output_neuron_indices;
+            this->output_neuron_indices = new std::vector<size_t>(output_neurons_indices);
+        }
+    }
 
-size_t NeuralNetwork::add_new_connection_to_list(ConnectionFunctionGeneral *con) {
-    this->connection_list->push_back(con);
-    return this->connection_list->size() - 1;
-}
+    void NeuralNetwork::print_weights() {
+        std::cout << "Connection weights: ";
+        if (this->connection_weights) {
+            for (size_t i = 0; i < this->connection_weights->size() - 1; ++i) {
+                std::cout << this->connection_weights->at(i) << " ";
+            }
+            std::cout << this->connection_weights->at(this->connection_weights->size() - 1);
+        }
 
-void NeuralNetwork::add_inward_connection(size_t s, size_t t, size_t con_idx) {
-    if(!this->inward_adjacency->at(s)){
-        this->inward_adjacency->at(s) = new std::vector<std::pair<size_t, size_t>>(0);
+        std::cout << std::endl;
     }
-    this->inward_adjacency->at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
-}
 
-void NeuralNetwork::add_outward_connection(size_t s, size_t t, size_t con_idx) {
-    if(!this->outward_adjacency->at(s)){
-        this->outward_adjacency->at(s) = new std::vector<std::pair<size_t, size_t>>(0);
+    void NeuralNetwork::print_stats() {
+        std::cout << "Number of neurons: " << this->neurons->size() << std::endl
+                  << "Number of connections: " << this->connection_list->size() << std::endl
+                  << "Number of active weights: " << this->connection_weights->size() << std::endl
+                  << "Number of active biases: " << this->neuron_biases->size() << std::endl;
     }
-    this->outward_adjacency->at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
-}
 
-void NeuralNetwork::analyze_layer_structure() {
+    std::vector<double> *NeuralNetwork::get_parameter_ptr_biases() {
+        return this->neuron_biases;
+    }
 
-    if(this->layers_analyzed){
-        //nothing to do
-        return;
+    std::vector<double> *NeuralNetwork::get_parameter_ptr_weights() {
+        return this->connection_weights;
     }
 
-    /* buffer preparation */
-    this->neuron_potentials->resize(this->get_n_neurons());
+    size_t NeuralNetwork::add_new_connection_to_list(ConnectionFunctionGeneral *con) {
+        this->connection_list->push_back(con);
+        return this->connection_list->size() - 1;
+    }
 
-    /* space allocation */
-    if(this->neuron_layers_feedforward){
-        for(auto e: *this->neuron_layers_feedforward){
-            delete e;
-            e = nullptr;
+    void NeuralNetwork::add_inward_connection(size_t s, size_t t, size_t con_idx) {
+        if (!this->inward_adjacency->at(s)) {
+            this->inward_adjacency->at(s) = new std::vector<std::pair<size_t, size_t>>(0);
         }
-        delete this->neuron_layers_feedforward;
-        this->neuron_layers_feedforward = nullptr;
+        this->inward_adjacency->at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
     }
 
-    if(this->neuron_layers_feedbackward){
-        for(auto e: *this->neuron_layers_feedbackward){
-            delete e;
-            e = nullptr;
+    void NeuralNetwork::add_outward_connection(size_t s, size_t t, size_t con_idx) {
+        if (!this->outward_adjacency->at(s)) {
+            this->outward_adjacency->at(s) = new std::vector<std::pair<size_t, size_t>>(0);
         }
-        delete this->neuron_layers_feedbackward;
-        this->neuron_layers_feedbackward = nullptr;
+        this->outward_adjacency->at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
     }
 
-    this->neuron_layers_feedforward = new std::vector<std::vector<size_t>*>(0);
-    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
+    void NeuralNetwork::analyze_layer_structure() {
 
+        if (this->layers_analyzed) {
+            //nothing to do
+            return;
+        }
 
-    auto n = this->neurons->size();
+        /* buffer preparation */
+        this->neuron_potentials->resize(this->get_n_neurons());
 
-    /* helpful counters */
-    std::vector<size_t> inward_saturation(n);
-    std::vector<size_t> outward_saturation(n);
-    std::fill(inward_saturation.begin(), inward_saturation.end(), 0);
-    std::fill(outward_saturation.begin(), outward_saturation.end(), 0);
-    for(unsigned int i = 0; i < n; ++i){
-        if(this->inward_adjacency->at(i)){
-            inward_saturation[i] = this->inward_adjacency->at(i)->size();
+        /* space allocation */
+        if (this->neuron_layers_feedforward) {
+            for (auto e: *this->neuron_layers_feedforward) {
+                delete e;
+                e = nullptr;
+            }
+            delete this->neuron_layers_feedforward;
+            this->neuron_layers_feedforward = nullptr;
         }
 
-        if(this->outward_adjacency->at(i)){
-            outward_saturation[i] = this->outward_adjacency->at(i)->size();
+        if (this->neuron_layers_feedbackward) {
+            for (auto e: *this->neuron_layers_feedbackward) {
+                delete e;
+                e = nullptr;
+            }
+            delete this->neuron_layers_feedbackward;
+            this->neuron_layers_feedbackward = nullptr;
         }
-    }
 
+        this->neuron_layers_feedforward = new std::vector<std::vector<size_t> *>(0);
+        this->neuron_layers_feedbackward = new std::vector<std::vector<size_t> *>(0);
 
-    std::vector<size_t> active_eval_set(2 * n);
-    size_t active_set_size[2];
 
-    /* feedforward analysis */
-    active_set_size[0] = 0;
-    active_set_size[1] = 0;
+        auto n = this->neurons->size();
 
-    size_t idx1 = 0, idx2 = 1;
+        /* helpful counters */
+        std::vector<size_t> inward_saturation(n);
+        std::vector<size_t> outward_saturation(n);
+        std::fill(inward_saturation.begin(), inward_saturation.end(), 0);
+        std::fill(outward_saturation.begin(), outward_saturation.end(), 0);
+        for (unsigned int i = 0; i < n; ++i) {
+            if (this->inward_adjacency->at(i)) {
+                inward_saturation[i] = this->inward_adjacency->at(i)->size();
+            }
 
-    active_set_size[0] = this->get_n_inputs();
-    size_t i = 0;
-    for(i = 0; i < this->get_n_inputs(); ++i){
-        active_eval_set[i] = this->input_neuron_indices->at(i);
-    }
+            if (this->outward_adjacency->at(i)) {
+                outward_saturation[i] = this->outward_adjacency->at(i)->size();
+            }
+        }
 
-    size_t active_ni;
-    while(active_set_size[idx1] > 0){
 
-        /* we add the current active set as the new outward layer */
-        std::vector<size_t> *new_feedforward_layer = new std::vector<size_t>(active_set_size[idx1]);
-        this->neuron_layers_feedforward->push_back( new_feedforward_layer );
+        std::vector<size_t> active_eval_set(2 * n);
+        size_t active_set_size[2];
 
-        //we iterate through the active neurons and propagate the signal
-        for(i = 0; i < active_set_size[idx1]; ++i){
-            active_ni = active_eval_set[i + n * idx1];
-            new_feedforward_layer->at( i ) = active_ni;
+        /* feedforward analysis */
+        active_set_size[0] = 0;
+        active_set_size[1] = 0;
 
-            if(!this->outward_adjacency->at(active_ni)){
-                continue;
-            }
+        size_t idx1 = 0, idx2 = 1;
+
+        active_set_size[0] = this->get_n_inputs();
+        size_t i = 0;
+        for (i = 0; i < this->get_n_inputs(); ++i) {
+            active_eval_set[i] = this->input_neuron_indices->at(i);
+        }
+
+        size_t active_ni;
+        while (active_set_size[idx1] > 0) {
+
+            /* we add the current active set as the new outward layer */
+            std::vector<size_t> *new_feedforward_layer = new std::vector<size_t>(active_set_size[idx1]);
+            this->neuron_layers_feedforward->push_back(new_feedforward_layer);
+
+            //we iterate through the active neurons and propagate the signal
+            for (i = 0; i < active_set_size[idx1]; ++i) {
+                active_ni = active_eval_set[i + n * idx1];
+                new_feedforward_layer->at(i) = active_ni;
 
-            for(auto ni: *(this->outward_adjacency->at(active_ni))){
-                inward_saturation[ni.first]--;
+                if (!this->outward_adjacency->at(active_ni)) {
+                    continue;
+                }
+
+                for (auto ni: *(this->outward_adjacency->at(active_ni))) {
+                    inward_saturation[ni.first]--;
 
-                if(inward_saturation[ni.first] == 0){
-                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
-                    active_set_size[idx2]++;
+                    if (inward_saturation[ni.first] == 0) {
+                        active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
+                        active_set_size[idx2]++;
+                    }
                 }
             }
-        }
 
-        idx1 = idx2;
-        idx2 = (idx1 + 1) % 2;
+            idx1 = idx2;
+            idx2 = (idx1 + 1) % 2;
 
-        active_set_size[idx2] = 0;
-    }
+            active_set_size[idx2] = 0;
+        }
 
 
-    /* feed backward analysis */
-    active_set_size[0] = 0;
-    active_set_size[1] = 0;
+        /* feed backward analysis */
+        active_set_size[0] = 0;
+        active_set_size[1] = 0;
 
-    idx1 = 0;
-    idx2 = 1;
+        idx1 = 0;
+        idx2 = 1;
 
-    active_set_size[0] = this->get_n_outputs();
-    for(i = 0; i < this->get_n_outputs(); ++i){
-        active_eval_set[i] = this->output_neuron_indices->at(i);
-    }
+        active_set_size[0] = this->get_n_outputs();
+        for (i = 0; i < this->get_n_outputs(); ++i) {
+            active_eval_set[i] = this->output_neuron_indices->at(i);
+        }
 
-    while(active_set_size[idx1] > 0){
+        while (active_set_size[idx1] > 0) {
 
-        /* we add the current active set as the new outward layer */
-        std::vector<size_t> *new_feedbackward_layer = new std::vector<size_t>(active_set_size[idx1]);
-        this->neuron_layers_feedbackward->push_back( new_feedbackward_layer );
+            /* we add the current active set as the new outward layer */
+            std::vector<size_t> *new_feedbackward_layer = new std::vector<size_t>(active_set_size[idx1]);
+            this->neuron_layers_feedbackward->push_back(new_feedbackward_layer);
 
-        //we iterate through the active neurons and propagate the signal backward
-        for(i = 0; i < active_set_size[idx1]; ++i){
-            active_ni = active_eval_set[i + n * idx1];
-            new_feedbackward_layer->at( i ) = active_ni;
+            //we iterate through the active neurons and propagate the signal backward
+            for (i = 0; i < active_set_size[idx1]; ++i) {
+                active_ni = active_eval_set[i + n * idx1];
+                new_feedbackward_layer->at(i) = active_ni;
 
-            if(!this->inward_adjacency->at(active_ni)){
-                continue;
-            }
+                if (!this->inward_adjacency->at(active_ni)) {
+                    continue;
+                }
 
-            for(auto ni: *(this->inward_adjacency->at(active_ni))){
-                outward_saturation[ni.first]--;
+                for (auto ni: *(this->inward_adjacency->at(active_ni))) {
+                    outward_saturation[ni.first]--;
 
-                if(outward_saturation[ni.first] == 0){
-                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
-                    active_set_size[idx2]++;
+                    if (outward_saturation[ni.first] == 0) {
+                        active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
+                        active_set_size[idx2]++;
+                    }
                 }
             }
-        }
 
-        idx1 = idx2;
-        idx2 = (idx1 + 1) % 2;
+            idx1 = idx2;
+            idx2 = (idx1 + 1) % 2;
 
-        active_set_size[idx2] = 0;
-    }
+            active_set_size[idx2] = 0;
+        }
 
-    this->layers_analyzed = true;
-}
+        this->layers_analyzed = true;
+    }
 
-void NeuralNetwork::save_text(std::string filepath) {
-    std::ofstream ofs(filepath);
-    {
-        boost::archive::text_oarchive oa(ofs);
-        oa << *this;
-        ofs.close();
+    void NeuralNetwork::save_text(std::string filepath) {
+        std::ofstream ofs(filepath);
+        {
+            boost::archive::text_oarchive oa(ofs);
+            oa << *this;
+            ofs.close();
+        }
     }
-}
+
+}
\ No newline at end of file
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index db2927a35715e5a13a54f806768e1d10ff965a55..032382fbcef955efc717189cd3413210b095af2b 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -28,288 +28,300 @@
 #include "../NetConnection/ConnectionFunctionGeneral.h"
 #include "../NetConnection/ConnectionFunctionIdentity.h"
 
-
-enum class BIAS_TYPE{NEXT_BIAS, NO_BIAS, EXISTING_BIAS};
-
-enum class SIMPLE_CONNECTION_TYPE{NEXT_WEIGHT, UNITARY_WEIGHT, EXISTING_WEIGHT};
-
-
-/**
- *
- */
-class NeuralNetwork {
-private:
-
-    /**
-     *
-     */
-    std::vector<Neuron*> *neurons = nullptr;
-
-    /**
-     *
-     */
-    std::vector<size_t>* input_neuron_indices = nullptr;
-
-    /**
-     *
-     */
-    std::vector<size_t>* output_neuron_indices = nullptr;
-
-    /**
-     *
-     */
-    std::vector<double>* connection_weights = nullptr;
-
-    /**
-     *
-     */
-    std::vector<double>* neuron_biases = nullptr;
-
-    /**
-     *
-     */
-    std::vector<int>* neuron_bias_indices = nullptr;
-
-    /**
-     *
-     */
-    std::vector<double>* neuron_potentials = nullptr;
-
-    /**
-     *
-     */
-    std::vector<ConnectionFunctionGeneral*> * connection_list = nullptr;
-
-    /**
-     *
-     */
-    std::vector<std::vector<std::pair<size_t, size_t>>*> * inward_adjacency = nullptr;
-
-    /**
-     *
-     */
-    std::vector<std::vector<std::pair<size_t, size_t>>*> * outward_adjacency = nullptr;
-
-    /**
-     *
-     */
-    std::vector<std::vector<size_t>*> *neuron_layers_feedforward = nullptr;
-
-    /**
-     *
-     */
-    std::vector<std::vector<size_t>*> *neuron_layers_feedbackward = nullptr;
-
-     /**
-     *
-     */
-    bool layers_analyzed = false;
-
-    /**
-     *
-     */
-    bool delete_weights = true;
-
-    /**
-     *
-     */
-    bool delete_biases = true;
-
-    /**
-     * Adds a new connection to the local list of connections
-     * @param con Connection object to be added
-     * @return Returns the index of the added connection among all the connections
-     */
-    size_t add_new_connection_to_list(ConnectionFunctionGeneral* con);
-
-    /**
-     * Adds a new entry (oriented edge s -> t) to the adjacency list of this network
-     * @param s Index of the source neuron
-     * @param t Index of the target neuron
-     * @param con_idx Index of the connection representing the edge
-     */
-    void add_outward_connection(size_t s, size_t t, size_t con_idx);
-
-    /**
-     * Adds a new entry (oriented edge s <- t) to the adjacency list of this network
-     * @param s Index of the source neuron
-     * @param t Index of the target neuron
-     * @param con_idx Index of the connection representing the edge
-     */
-    void add_inward_connection(size_t s, size_t t, size_t con_idx);
-
-    /**
-     * Performs one feedforward pass and feedbackward pass during which determines the layers of this neural network
-     * for simpler use during evaluation and learning
-     */
-    void analyze_layer_structure( );
-
-public:
-
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    /**
-     *
-     */
-    LIB4NEURO_API explicit NeuralNetwork();
-
-    /**
-     *
-     */
-    LIB4NEURO_API explicit NeuralNetwork(std::string filepath);
-
-    /**
-     *
-     */
-    LIB4NEURO_API virtual ~NeuralNetwork();
-
-    /**
-     * If possible, returns a neural net with 'input_neuron_indices' neurons as inputs and 'output_neuron_indices' as
-     * outputs, otherwise returns nullptr. The returned object shares adjustable weights with this network. All
-     * neurons are coppied (new instances), edges also. Uses a breadth-first search as the underlying algorithm.
-     * @param input_neuron_indices
-     * @param output_neuron_indices
-     * @return
-     */
-    LIB4NEURO_API NeuralNetwork* get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices);
-
-    /**
-     * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values
-     * @param parameters
-     */
-    LIB4NEURO_API virtual void copy_parameter_space(std::vector<double> *parameters);
-
-    /**
-     * Copies the pointers @{this->connection_weights} and @{this->neuron_biases} from the parental network, sets
-     * flags to not delete the vectors in this object
-     * @param parent_network
-     */
-    LIB4NEURO_API virtual void set_parameter_space_pointers( NeuralNetwork &parent_network );
-
-    /**
-     *
-     * @param input
-     * @param output
-     * @param custom_weights_and_biases
-     */
-    LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
-
-    /**
-     * Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
-     * @param[in] n
-     * @return
-     */
-    LIB4NEURO_API size_t add_neuron(Neuron* n, BIAS_TYPE bt = BIAS_TYPE::NEXT_BIAS, size_t bias_idx = 0);
-
-    /**
-     *
-     * @param n1_idx
-     * @param n2_idx
-     * @return
-     */
-    LIB4NEURO_API size_t add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct = SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT, size_t weight_idx = 0 );
-
-    /**
-     * Take the existing connection with index 'connection_idx' in 'parent_network' and adds it to the structure of this
-     * object
-     * @param n1_idx
-     * @param n2_idx
-     * @param connection_idx
-     * @param parent_network
-     */
-    LIB4NEURO_API void add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx, NeuralNetwork &parent_network );
-
-
-    /**
-     *
-     */
-    LIB4NEURO_API void randomize_weights();
-
-    /**
-     *
-     */
-    LIB4NEURO_API void randomize_biases();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_inputs();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_outputs();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_weights();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_biases();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual int get_neuron_bias_index( size_t neuron_idx );
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_neurons();
-
-    /**
-     *
-     * @param input_neurons_indices
-     */
-    LIB4NEURO_API void specify_input_neurons(std::vector<size_t> &input_neurons_indices);
-
-    /**
-     *
-     * @param output_neurons_indices
-     */
-    LIB4NEURO_API void specify_output_neurons(std::vector<size_t> &output_neurons_indices);
-
-    /**
-     *
-     */
-    LIB4NEURO_API void print_weights();
-
-    /**
-     *
-     */
-    LIB4NEURO_API void print_stats();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API std::vector<double>* get_parameter_ptr_weights();
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API std::vector<double>* get_parameter_ptr_biases();
-
-    /**
-     *
-     * @param filepath
-     */
-    LIB4NEURO_API void save_text(std::string filepath);
-
-};
+namespace lib4neuro {
+
+    enum class BIAS_TYPE {
+        NEXT_BIAS, NO_BIAS, EXISTING_BIAS
+    };
+
+    enum class SIMPLE_CONNECTION_TYPE {
+        NEXT_WEIGHT, UNITARY_WEIGHT, EXISTING_WEIGHT
+    };
+
+
+    /**
+     *
+     */
+    class NeuralNetwork {
+    private:
+
+        /**
+         *
+         */
+        std::vector<Neuron *> *neurons = nullptr;
+
+        /**
+         *
+         */
+        std::vector<size_t> *input_neuron_indices = nullptr;
+
+        /**
+         *
+         */
+        std::vector<size_t> *output_neuron_indices = nullptr;
+
+        /**
+         *
+         */
+        std::vector<double> *connection_weights = nullptr;
+
+        /**
+         *
+         */
+        std::vector<double> *neuron_biases = nullptr;
+
+        /**
+         *
+         */
+        std::vector<int> *neuron_bias_indices = nullptr;
+
+        /**
+         *
+         */
+        std::vector<double> *neuron_potentials = nullptr;
+
+        /**
+         *
+         */
+        std::vector<ConnectionFunctionGeneral *> *connection_list = nullptr;
+
+        /**
+         *
+         */
+        std::vector<std::vector<std::pair<size_t, size_t>> *> *inward_adjacency = nullptr;
+
+        /**
+         *
+         */
+        std::vector<std::vector<std::pair<size_t, size_t>> *> *outward_adjacency = nullptr;
+
+        /**
+         *
+         */
+        std::vector<std::vector<size_t> *> *neuron_layers_feedforward = nullptr;
+
+        /**
+         *
+         */
+        std::vector<std::vector<size_t> *> *neuron_layers_feedbackward = nullptr;
+
+        /**
+        *
+        */
+        bool layers_analyzed = false;
+
+        /**
+         *
+         */
+        bool delete_weights = true;
+
+        /**
+         *
+         */
+        bool delete_biases = true;
+
+        /**
+         * Adds a new connection to the local list of connections
+         * @param con Connection object to be added
+         * @return Returns the index of the added connection among all the connections
+         */
+        size_t add_new_connection_to_list(ConnectionFunctionGeneral *con);
+
+        /**
+         * Adds a new entry (oriented edge s -> t) to the adjacency list of this network
+         * @param s Index of the source neuron
+         * @param t Index of the target neuron
+         * @param con_idx Index of the connection representing the edge
+         */
+        void add_outward_connection(size_t s, size_t t, size_t con_idx);
+
+        /**
+         * Adds a new entry (oriented edge s <- t) to the adjacency list of this network
+         * @param s Index of the source neuron
+         * @param t Index of the target neuron
+         * @param con_idx Index of the connection representing the edge
+         */
+        void add_inward_connection(size_t s, size_t t, size_t con_idx);
+
+        /**
+         * Performs one feedforward pass and feedbackward pass during which determines the layers of this neural network
+         * for simpler use during evaluation and learning
+         */
+        void analyze_layer_structure();
+
+    public:
+
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        /**
+         *
+         */
+        LIB4NEURO_API explicit NeuralNetwork();
+
+        /**
+         *
+         */
+        LIB4NEURO_API explicit NeuralNetwork(std::string filepath);
+
+        /**
+         *
+         */
+        LIB4NEURO_API virtual ~NeuralNetwork();
+
+        /**
+         * If possible, returns a neural net with 'input_neuron_indices' neurons as inputs and 'output_neuron_indices' as
+         * outputs, otherwise returns nullptr. The returned object shares adjustable weights with this network. All
+         * neurons are coppied (new instances), edges also. Uses a breadth-first search as the underlying algorithm.
+         * @param input_neuron_indices
+         * @param output_neuron_indices
+         * @return
+         */
+        LIB4NEURO_API NeuralNetwork *
+        get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices);
+
+        /**
+         * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values
+         * @param parameters
+         */
+        LIB4NEURO_API virtual void copy_parameter_space(std::vector<double> *parameters);
+
+        /**
+         * Copies the pointers @{this->connection_weights} and @{this->neuron_biases} from the parental network, sets
+         * flags to not delete the vectors in this object
+         * @param parent_network
+         */
+        LIB4NEURO_API virtual void set_parameter_space_pointers(NeuralNetwork &parent_network);
+
+        /**
+         *
+         * @param input
+         * @param output
+         * @param custom_weights_and_biases
+         */
+        LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output,
+                                               std::vector<double> *custom_weights_and_biases = nullptr);
+
+        /**
+         * Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
+         * @param[in] n
+         * @return
+         */
+        LIB4NEURO_API size_t add_neuron(Neuron *n, BIAS_TYPE bt = BIAS_TYPE::NEXT_BIAS, size_t bias_idx = 0);
+
+        /**
+         *
+         * @param n1_idx
+         * @param n2_idx
+         * @return
+         */
+        LIB4NEURO_API size_t add_connection_simple(size_t n1_idx, size_t n2_idx,
+                                                   SIMPLE_CONNECTION_TYPE sct = SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT,
+                                                   size_t weight_idx = 0);
+
+        /**
+         * Take the existing connection with index 'connection_idx' in 'parent_network' and adds it to the structure of this
+         * object
+         * @param n1_idx
+         * @param n2_idx
+         * @param connection_idx
+         * @param parent_network
+         */
+        LIB4NEURO_API void
+        add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx, NeuralNetwork &parent_network);
+
+
+        /**
+         *
+         */
+        LIB4NEURO_API void randomize_weights();
+
+        /**
+         *
+         */
+        LIB4NEURO_API void randomize_biases();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_inputs();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_outputs();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_weights();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_biases();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual int get_neuron_bias_index(size_t neuron_idx);
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_neurons();
+
+        /**
+         *
+         * @param input_neurons_indices
+         */
+        LIB4NEURO_API void specify_input_neurons(std::vector<size_t> &input_neurons_indices);
+
+        /**
+         *
+         * @param output_neurons_indices
+         */
+        LIB4NEURO_API void specify_output_neurons(std::vector<size_t> &output_neurons_indices);
+
+        /**
+         *
+         */
+        LIB4NEURO_API void print_weights();
+
+        /**
+         *
+         */
+        LIB4NEURO_API void print_stats();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API std::vector<double> *get_parameter_ptr_weights();
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API std::vector<double> *get_parameter_ptr_biases();
+
+        /**
+         *
+         * @param filepath
+         */
+        LIB4NEURO_API void save_text(std::string filepath);
+
+    };
+
+}
 
 #endif //INC_4NEURO_NEURALNETWORK_H
diff --git a/src/Network/NeuralNetworkSerialization.h b/src/Network/NeuralNetworkSerialization.h
index 301ab3810c80f356dc8b78c18912449294902723..75300166df3928c271b015b8d8d96ec1d15b1646 100644
--- a/src/Network/NeuralNetworkSerialization.h
+++ b/src/Network/NeuralNetworkSerialization.h
@@ -24,26 +24,28 @@
 #include "NetConnection/ConnectionFunctionGeneralSerialization.h"
 #include "NetConnection/ConnectionFunctionIdentitySerialization.h"
 
-struct NeuralNetwork :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuralNetwork& nn, const unsigned int version) {
-        ar & nn.neurons;
-        ar & nn.input_neuron_indices;
-        ar & nn.output_neuron_indices;
-        ar & nn.connection_list;
-        ar & nn.neuron_biases;
-        ar & nn.neuron_bias_indices;
-        ar & nn.neuron_potentials;
-        ar & nn.connection_weights;
-        ar & nn.inward_adjacency;
-        ar & nn.outward_adjacency;
-        ar & nn.neuron_layers_feedforward;
-        ar & nn.neuron_layers_feedbackward;
-        ar & nn.layers_analyzed;
-        ar & nn.delete_weights;
-        ar & nn.delete_biases;
-    }
-};
+namespace lib4neuro {
+    struct NeuralNetwork::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuralNetwork &nn, const unsigned int version) {
+            ar & nn.neurons;
+            ar & nn.input_neuron_indices;
+            ar & nn.output_neuron_indices;
+            ar & nn.connection_list;
+            ar & nn.neuron_biases;
+            ar & nn.neuron_bias_indices;
+            ar & nn.neuron_potentials;
+            ar & nn.connection_weights;
+            ar & nn.inward_adjacency;
+            ar & nn.outward_adjacency;
+            ar & nn.neuron_layers_feedforward;
+            ar & nn.neuron_layers_feedbackward;
+            ar & nn.layers_analyzed;
+            ar & nn.delete_weights;
+            ar & nn.delete_biases;
+        }
+    };
+}
 
 namespace boost {
     namespace serialization {
@@ -56,9 +58,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuralNetwork & nn, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::NeuralNetwork & nn, const unsigned int version)
         {
-            NeuralNetwork::access::serialize(ar, nn, version);
+            lib4neuro::NeuralNetwork::access::serialize(ar, nn, version);
         }
 
     } // namespace serialization
diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp
index 2842c1aebf0f9acfb3b20c5d144020957ea8f191..5665593104e8aba0c67c826b8e98ffef0e0f68af 100644
--- a/src/Network/NeuralNetworkSum.cpp
+++ b/src/Network/NeuralNetworkSum.cpp
@@ -11,112 +11,116 @@
 #include "NeuralNetworkSumSerialization.h"
 #include "General/ExprtkWrapperSerialization.h"
 
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuralNetworkSum);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
 
-NeuralNetworkSum::NeuralNetworkSum(){
-    this->summand = nullptr;
-    this->summand_coefficient = nullptr;
-}
+namespace lib4neuro {
 
-NeuralNetworkSum::~NeuralNetworkSum() {
-    if( this->summand ){
-        delete this->summand;
+    NeuralNetworkSum::NeuralNetworkSum() {
         this->summand = nullptr;
+        this->summand_coefficient = nullptr;
     }
-    if( this->summand_coefficient ){
 
-        for(auto el: *this->summand_coefficient){
-            delete el;
+    NeuralNetworkSum::~NeuralNetworkSum() {
+        if (this->summand) {
+            delete this->summand;
+            this->summand = nullptr;
         }
+        if (this->summand_coefficient) {
 
-        delete this->summand_coefficient;
-        this->summand_coefficient = nullptr;
-    }
-}
+            for (auto el: *this->summand_coefficient) {
+                delete el;
+            }
 
-void NeuralNetworkSum::add_network( NeuralNetwork *net, std::string expression_string ) {
-    if(!this->summand){
-        this->summand = new std::vector<NeuralNetwork*>(0);
+            delete this->summand_coefficient;
+            this->summand_coefficient = nullptr;
+        }
     }
-    this->summand->push_back( net );
 
-    if(!this->summand_coefficient){
-        this->summand_coefficient = new std::vector<ExprtkWrapper*>(0);
+    void NeuralNetworkSum::add_network(NeuralNetwork *net, std::string expression_string) {
+        if (!this->summand) {
+            this->summand = new std::vector<NeuralNetwork *>(0);
+        }
+        this->summand->push_back(net);
+
+        if (!this->summand_coefficient) {
+            this->summand_coefficient = new std::vector<ExprtkWrapper *>(0);
+        }
+        this->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
     }
-    this->summand_coefficient->push_back( new ExprtkWrapper( expression_string ) );
-}
 
-void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases) {
-    std::vector<double> mem_output(output.size());
-    std::fill(output.begin(), output.end(), 0.0);
+    void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output,
+                                       std::vector<double> *custom_weights_and_biases) {
+        std::vector<double> mem_output(output.size());
+        std::fill(output.begin(), output.end(), 0.0);
 
-    NeuralNetwork *SUM;
+        NeuralNetwork *SUM;
 
-    for(size_t ni = 0; ni < this->summand->size(); ++ni){
-        SUM = this->summand->at(ni);
+        for (size_t ni = 0; ni < this->summand->size(); ++ni) {
+            SUM = this->summand->at(ni);
 
-        if( SUM ){
-            this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);
+            if (SUM) {
+                this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);
 
-            double alpha = this->summand_coefficient->at(ni)->eval(input);
+                double alpha = this->summand_coefficient->at(ni)->eval(input);
 
-            for(size_t j = 0; j < output.size(); ++j){
-                output[j] += mem_output[j] * alpha;
-            }
-        }
-        else{
-            //TODO assume the result can be a vector of doubles
-            double alpha = this->summand_coefficient->at(ni)->eval(input);
+                for (size_t j = 0; j < output.size(); ++j) {
+                    output[j] += mem_output[j] * alpha;
+                }
+            } else {
+                //TODO assume the result can be a vector of doubles
+                double alpha = this->summand_coefficient->at(ni)->eval(input);
 
-            for(size_t j = 0; j < output.size(); ++j){
-                output[j] += alpha;
+                for (size_t j = 0; j < output.size(); ++j) {
+                    output[j] += alpha;
+                }
             }
         }
+
     }
 
-}
+    size_t NeuralNetworkSum::get_n_weights() {
+        //TODO insufficient solution, assumes the networks share weights
+        if (this->summand) {
+            return this->summand->at(0)->get_n_weights();
+        }
 
-size_t NeuralNetworkSum::get_n_weights(){
-    //TODO insufficient solution, assumes the networks share weights
-    if(this->summand){
-        return this->summand->at(0)->get_n_weights();
+        return 0;
     }
 
-    return 0;
-}
+    size_t NeuralNetworkSum::get_n_biases() {
+        //TODO insufficient solution, assumes the networks share weights
+        if (this->summand) {
+            return this->summand->at(0)->get_n_biases();
+        }
 
-size_t NeuralNetworkSum::get_n_biases(){
-    //TODO insufficient solution, assumes the networks share weights
-    if(this->summand){
-        return this->summand->at(0)->get_n_biases();
+        return 0;
     }
 
-    return 0;
-}
+    size_t NeuralNetworkSum::get_n_inputs() {
+        //TODO insufficient solution, assumes the networks share weights
+        if (this->summand) {
+            return this->summand->at(0)->get_n_inputs();
+        }
 
-size_t NeuralNetworkSum::get_n_inputs() {
-    //TODO insufficient solution, assumes the networks share weights
-    if(this->summand){
-        return this->summand->at(0)->get_n_inputs();
+        return 0;
     }
 
-    return 0;
-}
+    size_t NeuralNetworkSum::get_n_neurons() {
+        //TODO insufficient solution, assumes the networks share weights
+        if (this->summand) {
+            return this->summand->at(0)->get_n_neurons();
+        }
 
-size_t NeuralNetworkSum::get_n_neurons() {
-    //TODO insufficient solution, assumes the networks share weights
-    if(this->summand){
-        return this->summand->at(0)->get_n_neurons();
+        return 0;
     }
 
-    return 0;
-}
+    size_t NeuralNetworkSum::get_n_outputs() {
+        //TODO insufficient solution, assumes the networks share weights
+        if (this->summand) {
+            return this->summand->at(0)->get_n_outputs();
+        }
 
-size_t NeuralNetworkSum::get_n_outputs() {
-    //TODO insufficient solution, assumes the networks share weights
-    if(this->summand){
-        return this->summand->at(0)->get_n_outputs();
+        return 0;
     }
 
-    return 0;
 }
\ No newline at end of file
diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h
index e8d883bc002a4a8d7f62c30e4c6947f9da0611d3..86391a1ebc2a9590458cbaf0d1ff5200d9baaf63 100644
--- a/src/Network/NeuralNetworkSum.h
+++ b/src/Network/NeuralNetworkSum.h
@@ -13,56 +13,61 @@
 
 #include "NeuralNetwork.h"
 
-class NeuralNetworkSum : public NeuralNetwork {
-private:
-    std::vector<NeuralNetwork*> * summand;
-    std::vector<ExprtkWrapper*> * summand_coefficient;
-
-
-public:
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    LIB4NEURO_API NeuralNetworkSum( );
-    LIB4NEURO_API virtual ~NeuralNetworkSum( );
-
-    LIB4NEURO_API void add_network( NeuralNetwork *net, std::string expression_string );
-
-    LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_inputs() override;
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API size_t get_n_outputs() override;
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_weights() override;
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_biases() override;
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual size_t get_n_neurons() override;
-};
-
+namespace lib4neuro {
+
+    class NeuralNetworkSum : public NeuralNetwork {
+    private:
+        std::vector<NeuralNetwork *> *summand;
+        std::vector<ExprtkWrapper *> *summand_coefficient;
+
+
+    public:
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        LIB4NEURO_API NeuralNetworkSum();
+
+        LIB4NEURO_API virtual ~NeuralNetworkSum();
+
+        LIB4NEURO_API void add_network(NeuralNetwork *net, std::string expression_string);
+
+        LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output,
+                                               std::vector<double> *custom_weights_and_biases = nullptr);
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_inputs() override;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API size_t get_n_outputs() override;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_weights() override;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_biases() override;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual size_t get_n_neurons() override;
+    };
+
+}
 
 #endif //INC_4NEURO_NEURALNETWORKSUM_H
diff --git a/src/Network/NeuralNetworkSumSerialization.h b/src/Network/NeuralNetworkSumSerialization.h
index 9b8aab0807107837194c7e6c14d5c51ef8a029e3..a70adad5d7531b2f2e6e89cfa97797715c233601 100644
--- a/src/Network/NeuralNetworkSumSerialization.h
+++ b/src/Network/NeuralNetworkSumSerialization.h
@@ -13,16 +13,18 @@
 #include "NeuralNetworkSum.h"
 #include "NeuralNetworkSerialization.h"
 
-BOOST_CLASS_EXPORT_KEY(NeuralNetworkSum);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuralNetworkSum);
 
-struct NeuralNetworkSum :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuralNetworkSum& n, const unsigned int version) {
-        ar & boost::serialization::base_object<NeuralNetwork>(n);
-        ar & n.summand;
-        ar & n.summand_coefficient;
-    }
-};
+namespace lib4neuro {
+    struct NeuralNetworkSum::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuralNetworkSum &n, const unsigned int version) {
+            ar & boost::serialization::base_object<NeuralNetwork>(n);
+            ar & n.summand;
+            ar & n.summand_coefficient;
+        }
+    };
+}
 
 namespace boost {
     namespace serialization {
@@ -35,9 +37,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuralNetworkSum& n, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::NeuralNetworkSum& n, const unsigned int version)
         {
-            NeuralNetworkSum::access::serialize(ar, n, version);
+            lib4neuro::NeuralNetworkSum::access::serialize(ar, n, version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/Neuron.cpp b/src/Neuron/Neuron.cpp
index dc4c4a3844e9fc5c6703deabe17cbe993eb024ca..014d1e993c82277d5d9e144ef5ca633fbeec3022 100644
--- a/src/Neuron/Neuron.cpp
+++ b/src/Neuron/Neuron.cpp
@@ -1,8 +1,11 @@
 #include "NeuronSerialization.h"
 
-BOOST_CLASS_EXPORT_IMPLEMENT(Neuron);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::Neuron);
 
+namespace lib4neuro {
 
-Neuron::~Neuron() {
+    Neuron::~Neuron() {
+
+    }
 
 }
diff --git a/src/Neuron/Neuron.h b/src/Neuron/Neuron.h
index f0d8fd1fbb29b8a853c9d31e3c30f1202ab6dd1b..d35a56dae08658b88eabd0279faad59e41eb2629 100644
--- a/src/Neuron/Neuron.h
+++ b/src/Neuron/Neuron.h
@@ -13,33 +13,34 @@
 #include "../settings.h"
 #include <vector>
 
+namespace lib4neuro {
 
 /**
   * Abstract class representing a general neuron
   */
-class Neuron {
+    class Neuron {
 
-public:
+    public:
 
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
 
-    /**
-     * Destructor of the Neuron object
-     * this level deallocates the array 'activation_function_parameters'
-     * also deallocates the OUTGOING connections
-     */
-    LIB4NEURO_API virtual ~Neuron();
+        /**
+         * Destructor of the Neuron object
+         * this level deallocates the array 'activation_function_parameters'
+         * also deallocates the OUTGOING connections
+         */
+        LIB4NEURO_API virtual ~Neuron();
 
-    /**
-     * Performs the activation function and returns the result
-     */
-    LIB4NEURO_API virtual double activate( double x, double b ) = 0;
+        /**
+         * Performs the activation function and returns the result
+         */
+        LIB4NEURO_API virtual double activate(double x, double b) = 0;
 
-}; /* end of Neuron class */
+    }; /* end of Neuron class */
 
 
 /**
@@ -47,28 +48,30 @@ public:
  * 'activation_function_eval_derivative',  'get_partial_derivative' and
  * 'get_derivative' methods.
  */
-class IDifferentiable {
-
-    /**
-     * Calculates the derivative with respect to the argument, ie the 'potential'
-     * @return f'(x), where 'f(x)' is the activation function and 'x' = 'potential'
-     */
-    virtual double activation_function_eval_derivative( double x, double b ) = 0;
-
-    /**
-     * Calculates the derivative with respect to the bias
-     * @return d/db f'(x), where 'f(x)' is the activation function, 'x' is the 'potential'
-     * and 'b' is the bias
-     */
-    virtual double activation_function_eval_derivative_bias( double x, double b ) = 0;
-
-    /**
-     * Returns a Neuron pointer object with activation function being the partial derivative of
-     * the activation function of this Neuron object with respect to the argument, i.e. 'potential'
-     * @return
-     */
-    virtual Neuron* get_derivative( ) = 0;
-
-}; /* end of IDifferentiable class */
-
- #endif /* NEURON_H_ */
\ No newline at end of file
+    class IDifferentiable {
+
+        /**
+         * Calculates the derivative with respect to the argument, ie the 'potential'
+         * @return f'(x), where 'f(x)' is the activation function and 'x' = 'potential'
+         */
+        virtual double activation_function_eval_derivative(double x, double b) = 0;
+
+        /**
+         * Calculates the derivative with respect to the bias
+         * @return d/db f'(x), where 'f(x)' is the activation function, 'x' is the 'potential'
+         * and 'b' is the bias
+         */
+        virtual double activation_function_eval_derivative_bias(double x, double b) = 0;
+
+        /**
+         * Returns a Neuron pointer object with activation function being the partial derivative of
+         * the activation function of this Neuron object with respect to the argument, i.e. 'potential'
+         * @return
+         */
+        virtual Neuron *get_derivative() = 0;
+
+    }; /* end of IDifferentiable class */
+
+}
+
+#endif /* NEURON_H_ */
\ No newline at end of file
diff --git a/src/Neuron/NeuronBinary.cpp b/src/Neuron/NeuronBinary.cpp
index b53c9462c5f7bde13b3a3802195ee19a2f807ec4..2b90fa3a0a285c74ae513c7dab5c54ad5c94b651 100644
--- a/src/Neuron/NeuronBinary.cpp
+++ b/src/Neuron/NeuronBinary.cpp
@@ -7,16 +7,18 @@
 #include "NeuronSerialization.h"
 #include "NeuronBinarySerialization.h"
 
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuronBinary);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronBinary);
 
-NeuronBinary::NeuronBinary( ) {}
+namespace lib4neuro {
+    NeuronBinary::NeuronBinary() {}
 
-double NeuronBinary::activate( double x, double b ) {
+    double NeuronBinary::activate(double x, double b) {
 
-    if(x >= b){
-        return 1.0;
+        if (x >= b) {
+            return 1.0;
+        } else {
+            return 0.0;
+        }
     }
-    else{
-        return 0.0;
-    }
-}
+
+}
\ No newline at end of file
diff --git a/src/Neuron/NeuronBinary.h b/src/Neuron/NeuronBinary.h
index ae52d75aab5fb5bcbbbdef349b6c316fcb02f777..1992b03fa3495787b9da990ebe54139d1b59c6f6 100644
--- a/src/Neuron/NeuronBinary.h
+++ b/src/Neuron/NeuronBinary.h
@@ -10,35 +10,37 @@
 #ifndef INC_4NEURO_NEURONBINARY_H
 #define INC_4NEURO_NEURONBINARY_H
 
-//#include "../settings.h"
-
 #include "Neuron.h"
 
+namespace lib4neuro {
+
 /**
  *  Binary neuron class - uses unit-step as the activation function
  */
-class NeuronBinary:public Neuron {
+    class NeuronBinary : public Neuron {
+
+    public:
 
-public:
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
 
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
+        /**
+         * Default constructor for the binary Neuron
+         * @param[in] threshold Denotes when the neuron is activated
+         * When neuron potential exceeds 'threshold' value it becomes excited
+         */
+        LIB4NEURO_API explicit NeuronBinary();
 
-    /**
-     * Default constructor for the binary Neuron
-     * @param[in] threshold Denotes when the neuron is activated
-     * When neuron potential exceeds 'threshold' value it becomes excited
-     */
-    LIB4NEURO_API explicit NeuronBinary( );
+        /**
+         * Performs the activation function and stores the result into the 'state' property
+         */
+        LIB4NEURO_API double activate(double x, double b) override;
 
-    /**
-     * Performs the activation function and stores the result into the 'state' property
-     */
-    LIB4NEURO_API double activate( double x, double b ) override;
+    };
 
-};
+}
 
 #endif //INC_4NEURO_NEURONBINARY_H
diff --git a/src/Neuron/NeuronBinarySerialization.h b/src/Neuron/NeuronBinarySerialization.h
index e227c45d8c7c5e8c02c9366b43deda12d34e5c78..a09f0b23cfe28a518f0e81aff45f54e73f1e6417 100644
--- a/src/Neuron/NeuronBinarySerialization.h
+++ b/src/Neuron/NeuronBinarySerialization.h
@@ -13,12 +13,12 @@
 #include "NeuronSerialization.h"
 #include "NeuronBinary.h"
 
-BOOST_CLASS_EXPORT_KEY(NeuronBinary);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronBinary);
 
-struct NeuronBinary :: access {
+struct lib4neuro :: NeuronBinary :: access {
     template <class Archive>
-    static void serialize(Archive &ar, NeuronBinary& n, const unsigned int version) {
-        ar & boost::serialization::base_object<Neuron>(n);
+    static void serialize(Archive &ar, lib4neuro::NeuronBinary& n, const unsigned int version) {
+        ar & boost::serialization::base_object<lib4neuro::Neuron>(n);
     }
 };
 
@@ -33,9 +33,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuronBinary& n, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::NeuronBinary& n, const unsigned int version)
         {
-            NeuronBinary::access::serialize(ar, n, version);
+            lib4neuro::NeuronBinary::access::serialize(ar, n, version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronConstant.cpp b/src/Neuron/NeuronConstant.cpp
index bc04b60dfe2461494097de6cb8a4961eb0cd4e23..f660e49d68f77074767b097296c7fe1e23c5e1fd 100644
--- a/src/Neuron/NeuronConstant.cpp
+++ b/src/Neuron/NeuronConstant.cpp
@@ -12,26 +12,29 @@
 #include "NeuronConstantSerialization.h"
 
 
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuronConstant);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronConstant);
 
-NeuronConstant::NeuronConstant( double c ) {
-    this->p = c;
-}
+namespace lib4neuro {
 
-double NeuronConstant::activate( double x, double b ) {
-    return  this->p;
-}
+    NeuronConstant::NeuronConstant(double c) {
+        this->p = c;
+    }
 
-double NeuronConstant::activation_function_eval_derivative_bias( double x, double b ) {
-    return 0.0;
-}
+    double NeuronConstant::activate(double x, double b) {
+        return this->p;
+    }
 
-double NeuronConstant::activation_function_eval_derivative( double x, double b ) {
-    return 0.0;
-}
+    double NeuronConstant::activation_function_eval_derivative_bias(double x, double b) {
+        return 0.0;
+    }
 
-Neuron* NeuronConstant::get_derivative() {
-    NeuronConstant* output = new NeuronConstant( );
-    return output;
-}
+    double NeuronConstant::activation_function_eval_derivative(double x, double b) {
+        return 0.0;
+    }
 
+    Neuron *NeuronConstant::get_derivative() {
+        NeuronConstant *output = new NeuronConstant();
+        return output;
+    }
+
+}
\ No newline at end of file
diff --git a/src/Neuron/NeuronConstant.h b/src/Neuron/NeuronConstant.h
index 1cab7cb1fbf5e78a1488e59a8a06e27155e2487f..7a5c3c8eaca8af73498109279391ce44fd5996cb 100644
--- a/src/Neuron/NeuronConstant.h
+++ b/src/Neuron/NeuronConstant.h
@@ -8,54 +8,55 @@
 #ifndef INC_4NEURO_NEURONCONSTANT_H
 #define INC_4NEURO_NEURONCONSTANT_H
 
-//#include "../settings.h"
-
 #include "Neuron.h"
-//#include "NeuronSerialization.h"
-
-class NeuronConstant: public Neuron, public IDifferentiable {
-private:
-    double p = 0.0;
-
-public:
-
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    /**
-     * Constructs the object of the Linear neuron with activation function
-     * f(x) = c
-     * @param[in] c Constant value
-     */
-    LIB4NEURO_API explicit NeuronConstant( double c = 0.0 );
-
-    /**
-     * Evaluates and returns 'c'
-     */
-    LIB4NEURO_API double activate( double x, double b ) override;
-
-    /**
-     * Calculates the partial derivative of the activation function
-     * f(x) = c at point x
-     * @return Partial derivative of the activation function according to the
-     * 'bias' parameter. Returns 0.0
-     */
-    LIB4NEURO_API double activation_function_eval_derivative_bias( double x, double b ) override;
-
-    /**
-     * Calculates d/dx of (c) at point x
-     * @return 0.0
-     */
-    LIB4NEURO_API double activation_function_eval_derivative( double x, double b ) override;
-
-    /**
-     * Returns a pointer to a Neuron with derivative as its activation function
-     * @return
-     */
-    LIB4NEURO_API Neuron* get_derivative( ) override;
-};
+
+namespace lib4neuro {
+
+    class NeuronConstant : public Neuron, public IDifferentiable {
+    private:
+        double p = 0.0;
+
+    public:
+
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        /**
+         * Constructs the object of the Linear neuron with activation function
+         * f(x) = c
+         * @param[in] c Constant value
+         */
+        LIB4NEURO_API explicit NeuronConstant(double c = 0.0);
+
+        /**
+         * Evaluates and returns 'c'
+         */
+        LIB4NEURO_API double activate(double x, double b) override;
+
+        /**
+         * Calculates the partial derivative of the activation function
+         * f(x) = c at point x
+         * @return Partial derivative of the activation function according to the
+         * 'bias' parameter. Returns 0.0
+         */
+        LIB4NEURO_API double activation_function_eval_derivative_bias(double x, double b) override;
+
+        /**
+         * Calculates d/dx of (c) at point x
+         * @return 0.0
+         */
+        LIB4NEURO_API double activation_function_eval_derivative(double x, double b) override;
+
+        /**
+         * Returns a pointer to a Neuron with derivative as its activation function
+         * @return
+         */
+        LIB4NEURO_API Neuron *get_derivative() override;
+    };
+
+}
 
 #endif //INC_4NEURO_NEURONCONSTANT_H
diff --git a/src/Neuron/NeuronConstantSerialization.h b/src/Neuron/NeuronConstantSerialization.h
index 2fd8ef406a8ac0ee6703c05accc3953c24d357d4..2880cd4f08ca2c40e538ba74415bd1012203342d 100644
--- a/src/Neuron/NeuronConstantSerialization.h
+++ b/src/Neuron/NeuronConstantSerialization.h
@@ -13,15 +13,17 @@
 #include "NeuronConstant.h"
 #include "NeuronSerialization.h"
 
-BOOST_CLASS_EXPORT_KEY(NeuronConstant);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronConstant);
 
-struct NeuronConstant :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuronConstant& n, const unsigned int version) {
-        ar & boost::serialization::base_object<Neuron>(n);
-        ar & n.p;
-    }
-};
+namespace lib4neuro {
+    struct NeuronConstant::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuronConstant &n, const unsigned int version) {
+            ar & boost::serialization::base_object<Neuron>(n);
+            ar & n.p;
+        }
+    };
+}
 
 namespace boost {
     namespace serialization {
@@ -34,9 +36,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuronConstant& n, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::NeuronConstant& n, const unsigned int version)
         {
-            NeuronConstant::access::serialize(ar, n, version);
+            lib4neuro::NeuronConstant::access::serialize(ar, n, version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronLinear.cpp b/src/Neuron/NeuronLinear.cpp
index 395b7e08dcac3735c8b315eed6bc1e6c930b0089..b095ea870d96bb798193115be8543c2d36932985 100644
--- a/src/Neuron/NeuronLinear.cpp
+++ b/src/Neuron/NeuronLinear.cpp
@@ -9,25 +9,27 @@
 #include "NeuronSerialization.h"
 #include "NeuronLinearSerialization.h"
 
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuronLinear);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronLinear);
 
+namespace lib4neuro {
+    NeuronLinear::NeuronLinear() {}
 
-NeuronLinear::NeuronLinear( ) {}
+    double NeuronLinear::activate(double x, double b) {
 
-double NeuronLinear::activate( double x, double b ) {
+        return x + b;
+    }
 
-    return  x + b;
-}
+    double NeuronLinear::activation_function_eval_derivative_bias(double x, double b) {
+        return 1.0;
+    }
 
-double NeuronLinear::activation_function_eval_derivative_bias( double x, double b ) {
-    return 1.0;
-}
+    double NeuronLinear::activation_function_eval_derivative(double x, double b) {
+        return 1.0;
+    }
 
-double NeuronLinear::activation_function_eval_derivative( double x, double b ) {
-    return 1.0;
-}
+    Neuron *NeuronLinear::get_derivative() {
+        NeuronConstant *output = new NeuronConstant(1.0);
+        return output;
+    }
 
-Neuron* NeuronLinear::get_derivative() {
-    NeuronConstant* output = new NeuronConstant( 1.0 );
-    return output;
-}
+}
\ No newline at end of file
diff --git a/src/Neuron/NeuronLinear.h b/src/Neuron/NeuronLinear.h
index 8d40deff4b3654a9cd724684f17e34d22d1364ec..fc6d56239414071a84ee8bc1ae90533598368014 100644
--- a/src/Neuron/NeuronLinear.h
+++ b/src/Neuron/NeuronLinear.h
@@ -11,57 +11,58 @@
 #define INC_4NEURO_NEURONLINEAR_H
 
 #include "../settings.h"
-
 #include "Neuron.h"
-//#include "NeuronConstant.h"
+
+namespace lib4neuro {
 
 /**
  * Linear neuron class - uses activation function in the form f(x)=a*x + b,
  * 'x' being the neuron's potential
  */
-class NeuronLinear:public Neuron, public IDifferentiable {
+    class NeuronLinear : public Neuron, public IDifferentiable {
 
-public:
+    public:
 
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
 
-    /**
-     * Constructs the object of the Linear neuron with activation function
-     * f(x) = x + b
-     * @param[in] b Bias
-     */
-    LIB4NEURO_API explicit NeuronLinear( );
+        /**
+         * Constructs the object of the Linear neuron with activation function
+         * f(x) = x + b
+         * @param[in] b Bias
+         */
+        LIB4NEURO_API explicit NeuronLinear();
 
-    /**
-     * Evaluates 'x + b' and stores the result into the 'state' property
-     */
-    LIB4NEURO_API double activate( double x, double b ) override;
+        /**
+         * Evaluates 'x + b' and stores the result into the 'state' property
+         */
+        LIB4NEURO_API double activate(double x, double b) override;
 
-    /**
-     * Calculates the partial derivative of the activation function
-     * f(x) = x + b at point x
-     * @return Partial derivative of the activation function according to the
-     * 'bias' parameter. Returns 1.0
-     */
-    LIB4NEURO_API double activation_function_eval_derivative_bias( double x, double b ) override;
+        /**
+         * Calculates the partial derivative of the activation function
+         * f(x) = x + b at point x
+         * @return Partial derivative of the activation function according to the
+         * 'bias' parameter. Returns 1.0
+         */
+        LIB4NEURO_API double activation_function_eval_derivative_bias(double x, double b) override;
 
-    /**
-     * Calculates d/dx of (x + b) at point x
-     * @return 1.0
-     */
-    LIB4NEURO_API double activation_function_eval_derivative( double x, double b ) override;
+        /**
+         * Calculates d/dx of (x + b) at point x
+         * @return 1.0
+         */
+        LIB4NEURO_API double activation_function_eval_derivative(double x, double b) override;
 
-    /**
-     * Returns a pointer to a Neuron with derivative as its activation function
-     * @return
-     */
-    LIB4NEURO_API Neuron* get_derivative( ) override;
+        /**
+         * Returns a pointer to a Neuron with derivative as its activation function
+         * @return
+         */
+        LIB4NEURO_API Neuron *get_derivative() override;
 
-};
+    };
 
+}
 
 #endif //INC_4NEURO_NEURONLINEAR_H
diff --git a/src/Neuron/NeuronLinearSerialization.h b/src/Neuron/NeuronLinearSerialization.h
index fdbbff5c3ccb8554cdb3f27f2afb2b29971e943f..38cab83b631720fc76697f6de439b85c739af094 100644
--- a/src/Neuron/NeuronLinearSerialization.h
+++ b/src/Neuron/NeuronLinearSerialization.h
@@ -13,15 +13,16 @@
 #include "NeuronLinear.h"
 #include "NeuronSerialization.h"
 
-BOOST_CLASS_EXPORT_KEY(NeuronLinear);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronLinear);
 
-
-struct NeuronLinear :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuronLinear& n, const unsigned int version) {
-        ar & boost::serialization::base_object<Neuron>(n);
-    }
-};
+namespace lib4neuro {
+    struct NeuronLinear::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuronLinear &n, const unsigned int version) {
+            ar & boost::serialization::base_object<Neuron>(n);
+        }
+    };
+}
 
 namespace boost {
     namespace serialization {
@@ -34,9 +35,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuronLinear& n, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::NeuronLinear& n, const unsigned int version)
         {
-            NeuronLinear::access::serialize(ar, n, version);
+            lib4neuro::NeuronLinear::access::serialize(ar, n, version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronLogistic.cpp b/src/Neuron/NeuronLogistic.cpp
index 4e52c629a17516d2a8865da3baeeffc482cdff24..1af9fde526e7821d78305582b712f0fe691cb61e 100644
--- a/src/Neuron/NeuronLogistic.cpp
+++ b/src/Neuron/NeuronLogistic.cpp
@@ -8,110 +8,113 @@
 #include "NeuronSerialization.h"
 #include "NeuronLogisticSerialization.h"
 
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuronLogistic);
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuronLogistic_d1);
-BOOST_CLASS_EXPORT_IMPLEMENT(NeuronLogistic_d2);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronLogistic);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronLogistic_d1);
+BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronLogistic_d2);
 
-NeuronLogistic_d2::NeuronLogistic_d2( ) {}
+namespace lib4neuro {
 
-double NeuronLogistic_d2::activate( double x, double b ) {
-    //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+    NeuronLogistic_d2::NeuronLogistic_d2() {}
 
-    double ex = std::pow(E, x);
-    double eb = std::pow(E, b);
-    double denom = (eb + ex);
+    double NeuronLogistic_d2::activate(double x, double b) {
+        //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
 
-    return (eb*ex*(eb - ex))/(denom*denom*denom);
-}
+        double ex = std::pow(lib4neuro::E, x);
+        double eb = std::pow(E, b);
+        double denom = (eb + ex);
 
-double NeuronLogistic_d2::activation_function_eval_derivative_bias( double x, double b ) {
-    //-(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
+        return (eb * ex * (eb - ex)) / (denom * denom * denom);
+    }
 
-    double eb = std::pow(E, b);
-    double ex = std::pow(E, x);
-    double ebex = eb * ex;
-    double denom = (eb + ex);
+    double NeuronLogistic_d2::activation_function_eval_derivative_bias(double x, double b) {
+        //-(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
 
-    return  -(ebex*(-4*ebex + eb*eb +ex*ex))/(denom*denom*denom*denom);
-}
+        double eb = std::pow(E, b);
+        double ex = std::pow(E, x);
+        double ebex = eb * ex;
+        double denom = (eb + ex);
 
-double NeuronLogistic_d2::activation_function_eval_derivative( double x, double b ) {
-    //(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
-    return -this->activation_function_eval_derivative_bias( x, b );
-}
+        return -(ebex * (-4 * ebex + eb * eb + ex * ex)) / (denom * denom * denom * denom);
+    }
 
-NeuronLogistic* NeuronLogistic_d2::get_derivative() {
-    //TODO maybe not the best way
-    return nullptr;
-}
+    double NeuronLogistic_d2::activation_function_eval_derivative(double x, double b) {
+        //(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
+        return -this->activation_function_eval_derivative_bias(x, b);
+    }
 
-NeuronLogistic_d1::NeuronLogistic_d1( ) {}
+    NeuronLogistic *NeuronLogistic_d2::get_derivative() {
+        //TODO maybe not the best way
+        return nullptr;
+    }
 
+    NeuronLogistic_d1::NeuronLogistic_d1() {}
 
-double NeuronLogistic_d1::activate( double x, double b ) {
-    //e^(b - x)/(e^(b - x) + 1)^2
 
-    double ex = std::pow(E, x);
-    double eb = std::pow(E, b);
-    double d = (eb/ex);
-    double denom = (d + 1);
+    double NeuronLogistic_d1::activate(double x, double b) {
+        //e^(b - x)/(e^(b - x) + 1)^2
 
-    return d/(denom*denom);
-}
+        double ex = std::pow(E, x);
+        double eb = std::pow(E, b);
+        double d = (eb / ex);
+        double denom = (d + 1);
 
-double NeuronLogistic_d1::activation_function_eval_derivative_bias( double x, double b ) {
-    //(e^(b + x) (e^x - e^b))/(e^b + e^x)^3
+        return d / (denom * denom);
+    }
 
-    double ex = std::pow(E, x);
-    double eb = std::pow(E, b);
-    double denom = (eb + ex);
+    double NeuronLogistic_d1::activation_function_eval_derivative_bias(double x, double b) {
+        //(e^(b + x) (e^x - e^b))/(e^b + e^x)^3
 
-    return (eb*ex* (ex - eb))/(denom*denom*denom);
-}
+        double ex = std::pow(E, x);
+        double eb = std::pow(E, b);
+        double denom = (eb + ex);
 
-double NeuronLogistic_d1::activation_function_eval_derivative( double x, double b ) {
-    //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-    return -this->activation_function_eval_derivative_bias( x, b );
-}
+        return (eb * ex * (ex - eb)) / (denom * denom * denom);
+    }
 
-NeuronLogistic* NeuronLogistic_d1::get_derivative( ) {
-    //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-    NeuronLogistic_d2* output = nullptr;
+    double NeuronLogistic_d1::activation_function_eval_derivative(double x, double b) {
+        //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+        return -this->activation_function_eval_derivative_bias(x, b);
+    }
 
-    output = new NeuronLogistic_d2( );
+    NeuronLogistic *NeuronLogistic_d1::get_derivative() {
+        //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+        NeuronLogistic_d2 *output = nullptr;
 
-    return output;
-}
+        output = new NeuronLogistic_d2();
 
-NeuronLogistic::NeuronLogistic( ) {}
+        return output;
+    }
 
-double NeuronLogistic::activate( double x, double b ) {
-    //(1 + e^(-x + b))^(-1)
+    NeuronLogistic::NeuronLogistic() {}
 
-    double ex = std::pow(E, b - x);
-    return 1.0 / (1.0 + ex);
-}
+    double NeuronLogistic::activate(double x, double b) {
+        //(1 + e^(-x + b))^(-1)
 
-double NeuronLogistic::activation_function_eval_derivative_bias( double x, double b ) {
-    //-e^(b - x)/(e^(b - x) + 1)^2
-    double ex = std::pow(E, b - x);
-    double denom = (ex + 1);
+        double ex = std::pow(E, b - x);
+        return 1.0 / (1.0 + ex);
+    }
 
-    return -ex/(denom*denom);
-}
+    double NeuronLogistic::activation_function_eval_derivative_bias(double x, double b) {
+        //-e^(b - x)/(e^(b - x) + 1)^2
+        double ex = std::pow(E, b - x);
+        double denom = (ex + 1);
 
+        return -ex / (denom * denom);
+    }
 
-double NeuronLogistic::activation_function_eval_derivative( double x, double b ) {
-    //e^(b - x)/(e^(b - x) + 1)^2
-    return -this->activation_function_eval_derivative_bias( x, b );
 
-}
+    double NeuronLogistic::activation_function_eval_derivative(double x, double b) {
+        //e^(b - x)/(e^(b - x) + 1)^2
+        return -this->activation_function_eval_derivative_bias(x, b);
 
-NeuronLogistic* NeuronLogistic::get_derivative( ) {
+    }
 
-    NeuronLogistic_d1 *output = nullptr;
-    output = new NeuronLogistic_d1( );
+    NeuronLogistic *NeuronLogistic::get_derivative() {
 
-    return output;
+        NeuronLogistic_d1 *output = nullptr;
+        output = new NeuronLogistic_d1();
+
+        return output;
+    }
 
 }
\ No newline at end of file
diff --git a/src/Neuron/NeuronLogistic.h b/src/Neuron/NeuronLogistic.h
index 0cb3cc1c7eac0a45cf4967e6b6e085261be6759d..a2ce6e80e573bcbd2ab69d817137e3032fbf2532 100644
--- a/src/Neuron/NeuronLogistic.h
+++ b/src/Neuron/NeuronLogistic.h
@@ -10,142 +10,142 @@
 #ifndef INC_4NEURO_NEURONLOGISTIC_H
 #define INC_4NEURO_NEURONLOGISTIC_H
 
-
-#include "../settings.h"
-
 #include <cmath>
 
+#include "../settings.h"
 #include "Neuron.h"
 
-class NeuronLogistic:public Neuron, public IDifferentiable {
-
-public:
-
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    /**
-     * Constructs the object of the Logistic neuron with activation function
-     * f(x) = (1 + e^(-x + b))^(-1)
-     */
-    LIB4NEURO_API explicit NeuronLogistic( );
-
-    /**
-     * Evaluates '(1 + e^(-x + b))^(-1)' and stores the result into the 'state' property
-     */
-    LIB4NEURO_API virtual double activate( double x, double b ) override;
-
-    /**
-     * Calculates the partial derivative of the activation function
-     * f(x) = (1 + e^(-x + b))^(-1)
-     * @return Partial derivative of the activation function according to the
-     * bias, returns: -e^(b - x)/(e^(b - x) + 1)^2
-     */
-    LIB4NEURO_API virtual double activation_function_eval_derivative_bias( double x, double b ) override;
-    /**
-     * Calculates d/dx of (1 + e^(-x + b))^(-1)
-     * @return e^(b - x)/(e^(b - x) + 1)^2
-     */
-    LIB4NEURO_API virtual double activation_function_eval_derivative( double x, double b ) override;
-
-    /**
-     * Returns a pointer to a Neuron with derivative as its activation function
-     * @return
-     */
-    LIB4NEURO_API virtual NeuronLogistic* get_derivative( ) override;
-};
-
-
-class NeuronLogistic_d1:public NeuronLogistic {
-
-public:
-
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    /**
-     * Constructs the object of the Logistic neuron with activation function
-     * f(x) = e^(b - x)/(e^(b - x) + 1)^2
-     * @param[in] b Bias
-     */
-    LIB4NEURO_API explicit NeuronLogistic_d1( );
-
-    /**
-     * Evaluates 'e^(b - x)/(e^(b - x) + 1)^2' and returns the result
-     */
-    LIB4NEURO_API virtual double activate( double x, double b ) override;
-
-    /**
-     * Calculates the partial derivative of the activation function
-     * f(x) = e^(b - x)/(e^(b - x) + 1)^2
-     * @return Partial derivative of the activation function according to the
-     * bias, returns: (e^(b + x) (e^x - e^b))/(e^b + e^x)^3
-     */
-    LIB4NEURO_API virtual double activation_function_eval_derivative_bias( double x, double b ) override;
-
-    /**
-     * Calculates d/dx of  e^(b - x)*(1 + e^(b - x))^(-2)
-     * @return  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-     */
-    LIB4NEURO_API virtual double activation_function_eval_derivative( double x, double b ) override;
-
-    /**
-     * Returns a pointer to a Neuron with derivative as its activation function
-     * @return
-     */
-    LIB4NEURO_API virtual NeuronLogistic* get_derivative( ) override;
-};
-
-
-class NeuronLogistic_d2:public NeuronLogistic_d1 {
-
-public:
-
-    /**
-     * Struct used to access private properties from
-     * the serialization function
-     */
-    struct access;
-
-    /**
-     * Constructs the object of the Logistic neuron with activation function
-     * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-     */
-    LIB4NEURO_API explicit NeuronLogistic_d2( );
-
-    /**
-     * Evaluates '(e^(b + x) (e^b - e^x))/(e^b + e^x)^3' and returns the result
-     */
-    LIB4NEURO_API virtual double activate( double x, double b ) override;
-
-    /**
-     * Calculates the partial derivative of the activation function
-     * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-     * @return Partial derivative of the activation function according to the
-     * bias, returns: -(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
-     */
-    LIB4NEURO_API virtual double activation_function_eval_derivative_bias( double x, double b ) override;
-
-    /**
-     * Calculates d/dx of  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-     * @return (e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
-     */
-    LIB4NEURO_API virtual double activation_function_eval_derivative( double x, double b ) override;
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API virtual NeuronLogistic* get_derivative( ) override;
-
-};
-
-
+namespace lib4neuro {
+
+    class NeuronLogistic : public Neuron, public IDifferentiable {
+
+    public:
+
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        /**
+         * Constructs the object of the Logistic neuron with activation function
+         * f(x) = (1 + e^(-x + b))^(-1)
+         */
+        LIB4NEURO_API explicit NeuronLogistic();
+
+        /**
+         * Evaluates '(1 + e^(-x + b))^(-1)' and stores the result into the 'state' property
+         */
+        LIB4NEURO_API virtual double activate(double x, double b) override;
+
+        /**
+         * Calculates the partial derivative of the activation function
+         * f(x) = (1 + e^(-x + b))^(-1)
+         * @return Partial derivative of the activation function according to the
+         * bias, returns: -e^(b - x)/(e^(b - x) + 1)^2
+         */
+        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x, double b) override;
+        /**
+         * Calculates d/dx of (1 + e^(-x + b))^(-1)
+         * @return e^(b - x)/(e^(b - x) + 1)^2
+         */
+        LIB4NEURO_API virtual double activation_function_eval_derivative(double x, double b) override;
+
+        /**
+         * Returns a pointer to a Neuron with derivative as its activation function
+         * @return
+         */
+        LIB4NEURO_API virtual NeuronLogistic *get_derivative() override;
+    };
+
+
+    class NeuronLogistic_d1 : public NeuronLogistic {
+
+    public:
+
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        /**
+         * Constructs the object of the Logistic neuron with activation function
+         * f(x) = e^(b - x)/(e^(b - x) + 1)^2
+         * @param[in] b Bias
+         */
+        LIB4NEURO_API explicit NeuronLogistic_d1();
+
+        /**
+         * Evaluates 'e^(b - x)/(e^(b - x) + 1)^2' and returns the result
+         */
+        LIB4NEURO_API virtual double activate(double x, double b) override;
+
+        /**
+         * Calculates the partial derivative of the activation function
+         * f(x) = e^(b - x)/(e^(b - x) + 1)^2
+         * @return Partial derivative of the activation function according to the
+         * bias, returns: (e^(b + x) (e^x - e^b))/(e^b + e^x)^3
+         */
+        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x, double b) override;
+
+        /**
+         * Calculates d/dx of  e^(b - x)*(1 + e^(b - x))^(-2)
+         * @return  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+         */
+        LIB4NEURO_API virtual double activation_function_eval_derivative(double x, double b) override;
+
+        /**
+         * Returns a pointer to a Neuron with derivative as its activation function
+         * @return
+         */
+        LIB4NEURO_API virtual NeuronLogistic *get_derivative() override;
+    };
+
+
+    class NeuronLogistic_d2 : public NeuronLogistic_d1 {
+
+    public:
+
+        /**
+         * Struct used to access private properties from
+         * the serialization function
+         */
+        struct access;
+
+        /**
+         * Constructs the object of the Logistic neuron with activation function
+         * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+         */
+        LIB4NEURO_API explicit NeuronLogistic_d2();
+
+        /**
+         * Evaluates '(e^(b + x) (e^b - e^x))/(e^b + e^x)^3' and returns the result
+         */
+        LIB4NEURO_API virtual double activate(double x, double b) override;
+
+        /**
+         * Calculates the partial derivative of the activation function
+         * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+         * @return Partial derivative of the activation function according to the
+         * bias, returns: -(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
+         */
+        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x, double b) override;
+
+        /**
+         * Calculates d/dx of  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+         * @return (e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
+         */
+        LIB4NEURO_API virtual double activation_function_eval_derivative(double x, double b) override;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API virtual NeuronLogistic *get_derivative() override;
+
+    };
+
+}
 
 #endif //INC_4NEURO_NEURONLOGISTIC_H
diff --git a/src/Neuron/NeuronLogisticSerialization.h b/src/Neuron/NeuronLogisticSerialization.h
index 0ca37ce9afd31f9f7469c8e74a5738589858d8ec..1d65fcba360ff47d9fcc4baeec740c1746f430de 100644
--- a/src/Neuron/NeuronLogisticSerialization.h
+++ b/src/Neuron/NeuronLogisticSerialization.h
@@ -14,30 +14,33 @@
 #include "NeuronLogistic.h"
 #include "NeuronSerialization.h"
 
-BOOST_CLASS_EXPORT_KEY(NeuronLogistic);
-BOOST_CLASS_EXPORT_KEY(NeuronLogistic_d1);
-BOOST_CLASS_EXPORT_KEY(NeuronLogistic_d2);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronLogistic);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronLogistic_d1);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronLogistic_d2);
 
-struct NeuronLogistic :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuronLogistic& n, const unsigned int version) {
-        ar & boost::serialization::base_object<Neuron>(n);
-    }
-};
+namespace lib4neuro {
+    struct NeuronLogistic::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuronLogistic &n, const unsigned int version) {
+            ar & boost::serialization::base_object<Neuron>(n);
+        }
+    };
 
-struct NeuronLogistic_d1 :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuronLogistic_d1& n, const unsigned int version) {
-        ar & boost::serialization::base_object<NeuronLogistic>(n);
-    }
-};
+    struct NeuronLogistic_d1::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuronLogistic_d1 &n, const unsigned int version) {
+            ar & boost::serialization::base_object<NeuronLogistic>(n);
+        }
+    };
+
+    struct NeuronLogistic_d2::access {
+        template<class Archive>
+        static void serialize(Archive &ar, NeuronLogistic_d2 &n, const unsigned int version) {
+            ar & boost::serialization::base_object<NeuronLogistic_d1>(n);
+        }
+    };
 
-struct NeuronLogistic_d2 :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, NeuronLogistic_d2& n, const unsigned int version) {
-        ar & boost::serialization::base_object<NeuronLogistic_d1>(n);
-    }
-};
+}
 
 namespace boost {
     namespace serialization {
@@ -50,9 +53,8 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuronLogistic& n, const unsigned int version)
-        {
-            NeuronLogistic::access::serialize(ar, n, version);
+        void serialize(Archive &ar, lib4neuro::NeuronLogistic &n, const unsigned int version) {
+            lib4neuro::NeuronLogistic::access::serialize(ar, n, version);
         }
 
         /**
@@ -63,9 +65,8 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuronLogistic_d1& n, const unsigned int version)
-        {
-            NeuronLogistic_d1::access::serialize(ar, n, version);
+        void serialize(Archive &ar, lib4neuro::NeuronLogistic_d1 &n, const unsigned int version) {
+            lib4neuro::NeuronLogistic_d1::access::serialize(ar, n, version);
         }
 
         /**
@@ -76,9 +77,8 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NeuronLogistic_d2& n, const unsigned int version)
-        {
-            NeuronLogistic_d2::access::serialize(ar, n, version);
+        void serialize(Archive &ar, lib4neuro::NeuronLogistic_d2 &n, const unsigned int version) {
+            lib4neuro::NeuronLogistic_d2::access::serialize(ar, n, version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronSerialization.h b/src/Neuron/NeuronSerialization.h
index 13be0c148acf5358c02424922e61f7c68f00c429..c1fd0330e9c857a8efd88ac35eef9d36f618edb1 100644
--- a/src/Neuron/NeuronSerialization.h
+++ b/src/Neuron/NeuronSerialization.h
@@ -12,15 +12,16 @@
 
 #include "Neuron.h"
 
-BOOST_SERIALIZATION_ASSUME_ABSTRACT(Neuron);
+BOOST_SERIALIZATION_ASSUME_ABSTRACT(lib4neuro::Neuron);
 
-BOOST_CLASS_EXPORT_KEY(Neuron);
+BOOST_CLASS_EXPORT_KEY(lib4neuro::Neuron);
 
-struct Neuron :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, Neuron& n, const unsigned int version) {}
+struct lib4neuro::Neuron::access {
+    template<class Archive>
+    static void serialize(Archive &ar, lib4neuro::Neuron &n, const unsigned int version) {}
 };
 
+
 namespace boost {
     namespace serialization {
 
@@ -32,9 +33,9 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, Neuron& n, const unsigned int version)
+        void serialize(Archive & ar, lib4neuro::Neuron& n, const unsigned int version)
         {
-            Neuron::access::serialize(ar, n, version);
+            lib4neuro::Neuron::access::serialize(ar, n, version);
         }
 
     } // namespace serialization
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 83dc750b44d80e52603728952fef83dd159ddd54..029dbd5592190bdb0bb81609ffa9541d3f05b960 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -5,379 +5,407 @@
  * @date 22.7.18 -
  */
 
+#include "../message.h"
 #include "DESolver.h"
 
 //TODO add support for multiple unknown functions
 
-MultiIndex::MultiIndex(size_t dimension) {
-    this->dim = dimension;
-    this->partial_derivatives_degrees.resize( this->dim );
-    std::fill( this->partial_derivatives_degrees.begin(), this->partial_derivatives_degrees.end(), 0 );
-}
+namespace lib4neuro {
+    MultiIndex::MultiIndex(size_t dimension) {
+        this->dim = dimension;
+        this->partial_derivatives_degrees.resize(this->dim);
+        std::fill(this->partial_derivatives_degrees.begin(), this->partial_derivatives_degrees.end(), 0);
+    }
 
-void MultiIndex::set_partial_derivative(size_t index, size_t value) {
-    this->partial_derivatives_degrees.at( index ) = value;
-}
+    void MultiIndex::set_partial_derivative(size_t index, size_t value) {
+        this->partial_derivatives_degrees.at(index) = value;
+    }
 
-std::vector<size_t>* MultiIndex::get_partial_derivatives_degrees() {
-    return &this->partial_derivatives_degrees;
-}
+    std::vector<size_t> *MultiIndex::get_partial_derivatives_degrees() {
+        return &this->partial_derivatives_degrees;
+    }
 
-bool MultiIndex::operator<(const MultiIndex &rhs) const {
-    if(dim < rhs.dim){ return true; }
-	else if(dim > rhs.dim){ return false; }
+    bool MultiIndex::operator<(const MultiIndex &rhs) const {
+        if (dim < rhs.dim) { return true; }
+        else if (dim > rhs.dim) { return false; }
 
-    for(size_t i = 0; i < dim; ++i){
-        if(partial_derivatives_degrees[i] < rhs.partial_derivatives_degrees[i]){
-            return true;
-        }
-		else if(partial_derivatives_degrees[i] > rhs.partial_derivatives_degrees[i]){
-            return false;
+        for (size_t i = 0; i < dim; ++i) {
+            if (partial_derivatives_degrees[i] < rhs.partial_derivatives_degrees[i]) {
+                return true;
+            } else if (partial_derivatives_degrees[i] > rhs.partial_derivatives_degrees[i]) {
+                return false;
+            }
         }
+        return false;
     }
-    return false;
-}
 
-std::string MultiIndex::to_string( )const {
-    std::string output;
-    char buff[ 255 ];
+    std::string MultiIndex::to_string() const {
+        std::string output;
+        char buff[255];
 
-    for( size_t i = 0; i < this->dim - 1; ++i){
-        sprintf(buff, "%d, ", (int)this->partial_derivatives_degrees[i]);
-        output.append( buff );
+        for (size_t i = 0; i < this->dim - 1; ++i) {
+            sprintf(buff, "%d, ", (int) this->partial_derivatives_degrees[i]);
+            output.append(buff);
+        }
+        sprintf(buff, "%d", (int) this->partial_derivatives_degrees[this->dim - 1]);
+        output.append(buff);
+
+        return output;
     }
-    sprintf(buff, "%d", (int)this->partial_derivatives_degrees[this->dim - 1]);
-    output.append( buff );
 
-    return output;
-}
+    size_t MultiIndex::get_degree() const {
+        size_t output = 0;
 
-size_t MultiIndex::get_degree() const{
-    size_t output = 0;
+        for (auto i: this->partial_derivatives_degrees) {
+            output += i;
+        }
 
-    for( auto i: this->partial_derivatives_degrees ){
-        output += i;
+        return output;
     }
 
-    return output;
-}
 
+    DESolver::DESolver(size_t n_equations, size_t n_inputs, size_t m) {
 
-DESolver::DESolver( size_t n_equations, size_t n_inputs, size_t m ) {
-
-    if( m <= 0 || n_inputs <= 0 || n_equations <= 0 ){
-        throw std::invalid_argument("Parameters 'm', 'n_equations', 'n_inputs' and 'n_outputs' must be greater than zero!");
-    }
-    printf("Differential Equation Solver with %d equations\n--------------------------------------------------------------------------\n", (int)n_equations);
+        if (m <= 0 || n_inputs <= 0 || n_equations <= 0) {
+            throw std::invalid_argument(
+                    "Parameters 'm', 'n_equations', 'n_inputs' and 'n_outputs' must be greater than zero!");
+        }
+        MSG_INFO("Differential Equation Solver with " << n_equations << std::endl
+                                                      << "--------------------------------------------------------------------------");
 
-    printf("Constructing NN structure representing the solution [%d input neurons][%d inner neurons]...\n", (int)n_inputs, (int)m);
+        MSG_INFO("Constructing NN structure representing the solution [" << n_inputs << "input neurons][" << m
+                                                                         << "inner neurons]...");
 
-    this->dim_i = n_inputs;
-    this->dim_inn= m;
-    this->n_equations = n_equations;
+        this->dim_i = n_inputs;
+        this->dim_inn = m;
+        this->n_equations = n_equations;
 
-    this->solution = new NeuralNetwork( );
+        this->solution = new NeuralNetwork();
 
-    this->solution_inner_neurons = new std::vector<NeuronLogistic*>(0);
-    this->solution_inner_neurons->reserve( m );
+        this->solution_inner_neurons = new std::vector<NeuronLogistic *>(0);
+        this->solution_inner_neurons->reserve(m);
 
-    /* input neurons */
-    std::vector<size_t> input_set( this->dim_i );
-    size_t idx;
-    for( size_t i = 0; i < this->dim_i; ++i ){
-        NeuronLinear *input_i = new NeuronLinear( );  //f(x) = x
-        idx = this->solution->add_neuron( input_i, BIAS_TYPE::NO_BIAS );
-        input_set[i] = idx;
-    }
-    this->solution->specify_input_neurons( input_set );
-    size_t first_input_neuron = input_set[0];
-
-    /* output neuron */
-    std::vector<size_t> output_set( 1 );
-    idx = this->solution->add_neuron( new NeuronLinear( ), BIAS_TYPE::NO_BIAS );//f(x) = x
-    output_set[0] = idx;
-    this->solution->specify_output_neurons( output_set );
-    size_t first_output_neuron = idx;
-
-    /* inner neurons */
-    size_t first_inner_neuron = 0;
-    for(size_t i = 0; i < this->dim_inn; ++i){
-        NeuronLogistic *inner_i = new NeuronLogistic( ); //f(x) = 1.0 / (1.0 + e^(-x))
-        this->solution_inner_neurons->push_back( inner_i );
-        idx = this->solution->add_neuron( inner_i, BIAS_TYPE::NEXT_BIAS );
-
-        if(i == 0){
-            first_inner_neuron = idx;
+        /* input neurons */
+        std::vector<size_t> input_set(this->dim_i);
+        size_t idx;
+        for (size_t i = 0; i < this->dim_i; ++i) {
+            NeuronLinear *input_i = new NeuronLinear();  //f(x) = x
+            idx = this->solution->add_neuron(input_i, BIAS_TYPE::NO_BIAS);
+            input_set[i] = idx;
+        }
+        this->solution->specify_input_neurons(input_set);
+        size_t first_input_neuron = input_set[0];
+
+        /* output neuron */
+        std::vector<size_t> output_set(1);
+        idx = this->solution->add_neuron(new NeuronLinear(), BIAS_TYPE::NO_BIAS);//f(x) = x
+        output_set[0] = idx;
+        this->solution->specify_output_neurons(output_set);
+        size_t first_output_neuron = idx;
+
+        /* inner neurons */
+        size_t first_inner_neuron = 0;
+        for (size_t i = 0; i < this->dim_inn; ++i) {
+            NeuronLogistic *inner_i = new NeuronLogistic(); //f(x) = 1.0 / (1.0 + e^(-x))
+            this->solution_inner_neurons->push_back(inner_i);
+            idx = this->solution->add_neuron(inner_i, BIAS_TYPE::NEXT_BIAS);
+
+            if (i == 0) {
+                first_inner_neuron = idx;
+            }
         }
-    }
 
 
-    /* connections between input neurons and inner neurons */
-    size_t weight_idx;
-    for(size_t i = 0; i < this->dim_i; ++i){
-        for(size_t j = 0; j < this->dim_inn; ++j){
-            weight_idx = this->solution->add_connection_simple(first_input_neuron + i, first_inner_neuron + j, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT );
-            printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], weight index %3d\n", (int)i, (int)(first_input_neuron + i), (int)j, (int)(first_inner_neuron + j), (int)weight_idx);
+        /* connections between input neurons and inner neurons */
+        size_t weight_idx;
+        for (size_t i = 0; i < this->dim_i; ++i) {
+            for (size_t j = 0; j < this->dim_inn; ++j) {
+                weight_idx = this->solution->add_connection_simple(first_input_neuron + i, first_inner_neuron + j,
+                                                                   SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+                printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], weight index %3d\n",
+                       (int) i, (int) (first_input_neuron + i), (int) j, (int) (first_inner_neuron + j),
+                       (int) weight_idx);
+            }
         }
-    }
 
-    /* connections between inner neurons and output neurons */
-    for(size_t i = 0; i < this->dim_inn; ++i){
-        weight_idx = this->solution->add_connection_simple(first_inner_neuron + i, first_output_neuron, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT );
-        printf("  adding a connection between inner neuron %2d[%2d] and output neuron %2d[%2d], weight index %3d\n", (int)i, (int)(first_inner_neuron + i), 0, (int)(first_output_neuron ), (int)weight_idx);
-    }
+        /* connections between inner neurons and output neurons */
+        for (size_t i = 0; i < this->dim_inn; ++i) {
+            weight_idx = this->solution->add_connection_simple(first_inner_neuron + i, first_output_neuron,
+                                                               SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+            printf("  adding a connection between inner neuron %2d[%2d] and output neuron %2d[%2d], weight index %3d\n",
+                   (int) i, (int) (first_inner_neuron + i), 0, (int) (first_output_neuron), (int) weight_idx);
+        }
 
-    MultiIndex initial_mi(this->dim_i);
+        MultiIndex initial_mi(this->dim_i);
 
-    this->map_multiindices2nn[initial_mi] = this->solution;
+        this->map_multiindices2nn[initial_mi] = this->solution;
 
-    this->differential_equations = new std::vector<NeuralNetworkSum*>(0);
-    this->differential_equations->reserve(this->n_equations);
+        this->differential_equations = new std::vector<NeuralNetworkSum *>(0);
+        this->differential_equations->reserve(this->n_equations);
 
-    for( unsigned int i = 0; i < this->n_equations; ++i ){
-        NeuralNetworkSum *new_sum = new NeuralNetworkSum();
-        this->differential_equations->push_back(new_sum);
-    }
+        for (unsigned int i = 0; i < this->n_equations; ++i) {
+            NeuralNetworkSum *new_sum = new NeuralNetworkSum();
+            this->differential_equations->push_back(new_sum);
+        }
 
-    this->errors_functions_types = new std::vector<ErrorFunctionType >(this->n_equations);
-    this->errors_functions_data_sets = new std::vector<DataSet*>(this->n_equations);
+        this->errors_functions_types = new std::vector<ErrorFunctionType>(this->n_equations);
+        this->errors_functions_data_sets = new std::vector<DataSet *>(this->n_equations);
 
-    printf("done\n");
+        MSG_INFO("Solver was successfully constructed.\n");
 
-}
+    }
 
-DESolver::~DESolver() {
+    DESolver::~DESolver() {
 
-    if( this->solution_inner_neurons ){
-        delete this->solution_inner_neurons;
-        this->solution_inner_neurons = nullptr;
-    }
+        if (this->solution_inner_neurons) {
+            delete this->solution_inner_neurons;
+            this->solution_inner_neurons = nullptr;
+        }
 
-    if( this->errors_functions_types ){
-        delete this->errors_functions_types;
-        this->errors_functions_types = nullptr;
-    }
+        if (this->errors_functions_types) {
+            delete this->errors_functions_types;
+            this->errors_functions_types = nullptr;
+        }
 
-    if( this->errors_functions_data_sets ){
-        delete this->errors_functions_data_sets;
-        this->errors_functions_data_sets = nullptr;
-    }
+        if (this->errors_functions_data_sets) {
+            delete this->errors_functions_data_sets;
+            this->errors_functions_data_sets = nullptr;
+        }
 
-    if(this->differential_equations){
-        for(auto nns: *this->differential_equations){
-            delete nns;
+        if (this->differential_equations) {
+            for (auto nns: *this->differential_equations) {
+                delete nns;
+            }
+            delete this->differential_equations;
+            this->differential_equations = nullptr;
         }
-        delete this->differential_equations;
-        this->differential_equations = nullptr;
-    }
 
+        for (auto nn: this->map_multiindices2nn) {
+            NeuralNetwork *n_to_delete = nn.second;
+            delete n_to_delete;
+        }
 
-    for(auto nn: this->map_multiindices2nn){
-        NeuralNetwork * n_to_delete = nn.second;
-        delete n_to_delete;
     }
 
-}
-
 //TODO more efficient representation of the functions (large portion of the structure is the same for all partial derivatives)
-void DESolver::add_to_differential_equation( size_t equation_idx, MultiIndex &alpha, std::string expression_string ) {
+    void DESolver::add_to_differential_equation(size_t equation_idx, MultiIndex &alpha, std::string expression_string) {
 
-    if( equation_idx >= this->n_equations ){
-        throw std::invalid_argument( "The provided equation index is too large!" );
-    }
+        if (equation_idx >= this->n_equations) {
+            throw std::invalid_argument("The provided equation index is too large!");
+        }
 
-    size_t derivative_degree = alpha.get_degree( );
+        size_t derivative_degree = alpha.get_degree();
 
-    if( derivative_degree > 2 ){
-        throw std::invalid_argument("The supplied multi-index represents partial derivative of order higher than 2! (Valid degree is at most 2)\n");
-    }
+        if (derivative_degree > 2) {
+            throw std::invalid_argument(
+                    "The supplied multi-index represents partial derivative of order higher than 2! (Valid degree is at most 2)\n");
+        }
 
-    /* retrieve indices of the variables according to which we perform the derivations ( applicable to any order, not just 2 or less )*/
-    std::vector<size_t> partial_derivative_indices;
-    partial_derivative_indices.reserve(derivative_degree);
-    for( size_t i = 0; i < alpha.get_partial_derivatives_degrees()->size( ); ++i ){
-        size_t degree = alpha.get_partial_derivatives_degrees()->at( i );
+        /* retrieve indices of the variables according to which we perform the derivations ( applicable to any order, not just 2 or less )*/
+        std::vector<size_t> partial_derivative_indices;
+        partial_derivative_indices.reserve(derivative_degree);
+        for (size_t i = 0; i < alpha.get_partial_derivatives_degrees()->size(); ++i) {
+            size_t degree = alpha.get_partial_derivatives_degrees()->at(i);
 
-        while( degree > 0 ){
+            while (degree > 0) {
 
-            partial_derivative_indices.push_back( i );
-            degree--;
+                partial_derivative_indices.push_back(i);
+                degree--;
 
+            }
         }
-    }
 
-    NeuralNetwork *new_net = nullptr;
-    /* we check whether the new multi-index is already present */
-    if(map_multiindices2nn.find( alpha ) != map_multiindices2nn.end()){
-        new_net = map_multiindices2nn[ alpha ];
-        this->differential_equations->at( equation_idx )->add_network( new_net, expression_string );
-        printf("\nAdding an existing partial derivative (multi-index: %s) to equation %d with coefficient %s\n", alpha.to_string().c_str(), (int)equation_idx, expression_string.c_str());
-        return;
-    }
-    printf("\nAdding a new partial derivative (multi-index: %s) to equation %d with coefficient %s\n", alpha.to_string().c_str(), (int)equation_idx, expression_string.c_str());
-
-    /* we need to construct a new neural network */
-    new_net = new NeuralNetwork( );
-    new_net->set_parameter_space_pointers( *this->solution );
-
-    /* input neurons */
-    std::vector<size_t> input_set( this->dim_i );
-    size_t idx;
-    for( size_t i = 0; i < this->dim_i; ++i ){
-        NeuronLinear *input_i = new NeuronLinear( );  //f(x) = x
-        idx = new_net->add_neuron( input_i, BIAS_TYPE::NO_BIAS );
-        input_set[i] = idx;
-    }
-    new_net->specify_input_neurons( input_set );
-    size_t first_input_neuron = input_set[0];
+        NeuralNetwork *new_net = nullptr;
+        /* we check whether the new multi-index is already present */
+        if (map_multiindices2nn.find(alpha) != map_multiindices2nn.end()) {
+            new_net = map_multiindices2nn[alpha];
+            this->differential_equations->at(equation_idx)->add_network(new_net, expression_string);
+            printf("\nAdding an existing partial derivative (multi-index: %s) to equation %d with coefficient %s\n",
+                   alpha.to_string().c_str(), (int) equation_idx, expression_string.c_str());
+            return;
+        }
+        printf("\nAdding a new partial derivative (multi-index: %s) to equation %d with coefficient %s\n",
+               alpha.to_string().c_str(), (int) equation_idx, expression_string.c_str());
+
+        /* we need to construct a new neural network */
+        new_net = new NeuralNetwork();
+        new_net->set_parameter_space_pointers(*this->solution);
+
+        /* input neurons */
+        std::vector<size_t> input_set(this->dim_i);
+        size_t idx;
+        for (size_t i = 0; i < this->dim_i; ++i) {
+            NeuronLinear *input_i = new NeuronLinear();  //f(x) = x
+            idx = new_net->add_neuron(input_i, BIAS_TYPE::NO_BIAS);
+            input_set[i] = idx;
+        }
+        new_net->specify_input_neurons(input_set);
+        size_t first_input_neuron = input_set[0];
+
 
+        /* output neurons */
+        std::vector<size_t> output_set(1);
+        idx = new_net->add_neuron(new NeuronLinear(), BIAS_TYPE::NO_BIAS);//f(x) = x
+        output_set[0] = idx;
+        new_net->specify_output_neurons(output_set);
+        size_t first_output_neuron = idx;
 
-    /* output neurons */
-    std::vector<size_t> output_set( 1 );
-    idx = new_net->add_neuron( new NeuronLinear( ), BIAS_TYPE::NO_BIAS );//f(x) = x
-    output_set[0] = idx;
-    new_net->specify_output_neurons( output_set );
-    size_t first_output_neuron = idx;
+        /* the new partial derivative has degree of at least one */
+        size_t first_inner_neuron = 0;
+        NeuronLogistic *n_ptr = nullptr, *n_ptr2 = nullptr;
+        for (size_t i = 0; i < this->dim_inn; ++i) {
+            n_ptr = this->solution_inner_neurons->at(i);
 
-    /* the new partial derivative has degree of at least one */
-    size_t first_inner_neuron = 0;
-    NeuronLogistic *n_ptr = nullptr, *n_ptr2 = nullptr;
-    for( size_t i = 0; i < this->dim_inn; ++i ){
-        n_ptr = this->solution_inner_neurons->at( i );
+            for (size_t j = 0; j < derivative_degree; ++j) {
+                n_ptr2 = n_ptr;
 
-        for( size_t j = 0; j < derivative_degree; ++j){
-            n_ptr2 = n_ptr;
+                n_ptr = n_ptr->get_derivative();
 
-            n_ptr = n_ptr->get_derivative( );
+                if (j > 0) {
+                    delete n_ptr2;
+                    n_ptr2 = nullptr;
+                }
 
-            if(j > 0){
-                delete n_ptr2;
-                n_ptr2 = nullptr;
             }
+            idx = new_net->add_neuron(n_ptr, BIAS_TYPE::EXISTING_BIAS,
+                                      this->solution->get_neuron_bias_index(i + this->dim_i + 1));
 
+            if (i == 0) {
+                first_inner_neuron = idx;
+            }
         }
-        idx = new_net->add_neuron( n_ptr, BIAS_TYPE::EXISTING_BIAS, this->solution->get_neuron_bias_index( i + this->dim_i + 1 ) );
 
-        if(i == 0){
-            first_inner_neuron = idx;
+        /* identity neurons serving as a 'glue'*/
+        size_t first_glue_neuron = idx + 1;
+        for (size_t i = 0; i < derivative_degree * this->dim_inn; ++i) {
+            idx = new_net->add_neuron(new NeuronLinear(), BIAS_TYPE::NO_BIAS); //f(x) = x
         }
-    }
 
-    /* identity neurons serving as a 'glue'*/
-    size_t first_glue_neuron = idx + 1;
-    for(size_t i = 0; i < derivative_degree * this->dim_inn; ++i){
-        idx = new_net->add_neuron( new NeuronLinear( ), BIAS_TYPE::NO_BIAS ); //f(x) = x
-    }
-
-    /* connections between input neurons and inner neurons */
-    size_t connection_idx = 0;
-    for(size_t i = 0; i < this->dim_i; ++i){
-        for(size_t j = 0; j < this->dim_inn; ++j){
-            printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], connection index: %3d\n", (int)i, (int)(first_input_neuron + i), (int)j, (int)(first_inner_neuron + j), (int)connection_idx);
-            new_net->add_existing_connection(first_input_neuron + i, first_inner_neuron + j, connection_idx, *this->solution );
+        /* connections between input neurons and inner neurons */
+        size_t connection_idx = 0;
+        for (size_t i = 0; i < this->dim_i; ++i) {
+            for (size_t j = 0; j < this->dim_inn; ++j) {
+                printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], connection index: %3d\n",
+                       (int) i, (int) (first_input_neuron + i), (int) j, (int) (first_inner_neuron + j),
+                       (int) connection_idx);
+                new_net->add_existing_connection(first_input_neuron + i, first_inner_neuron + j, connection_idx,
+                                                 *this->solution);
+                connection_idx++;
+            }
+        }
+        printf("----------------------------------------------------------------------------------------------------\n");
+
+        /* connections between inner neurons and the first set of 'glueing' neurons */
+        for (size_t i = 0; i < this->dim_inn; ++i) {
+            printf("  adding a connection between inner neuron %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n",
+                   (int) i, (int) (first_inner_neuron + i), (int) i, (int) (first_glue_neuron + i),
+                   (int) connection_idx);
+            new_net->add_existing_connection(first_inner_neuron + i, first_glue_neuron + i, connection_idx,
+                                             *this->solution);
             connection_idx++;
         }
-    }
-    printf("----------------------------------------------------------------------------------------------------\n");
-
-    /* connections between inner neurons and the first set of 'glueing' neurons */
-    for(size_t i = 0; i < this->dim_inn; ++i){
-        printf("  adding a connection between inner neuron %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n", (int)i, (int)(first_inner_neuron + i), (int)i, (int)(first_glue_neuron + i), (int)connection_idx);
-        new_net->add_existing_connection(first_inner_neuron + i, first_glue_neuron + i, connection_idx, *this->solution );
-        connection_idx++;
-    }
-    printf("----------------------------------------------------------------------------------------------------\n");
+        printf("----------------------------------------------------------------------------------------------------\n");
+
+        size_t pd_idx;
+        /* connections between glueing neurons */
+        for (size_t di = 0; di < derivative_degree - 1; ++di) {
+            pd_idx = partial_derivative_indices[di];/* partial derivative index */
+            for (size_t i = 0; i < this->dim_inn; ++i) {
+                connection_idx = pd_idx * this->dim_inn + i;
+                printf("  adding a connection between glue neuron  %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n",
+                       (int) (i + (di) * this->dim_inn), (int) (first_glue_neuron + i + (di) * this->dim_inn),
+                       (int) (i + (di + 1) * this->dim_inn), (int) (first_glue_neuron + i + (di + 1) * this->dim_inn),
+                       (int) connection_idx);
+                new_net->add_existing_connection(first_glue_neuron + i + (di) * this->dim_inn,
+                                                 first_glue_neuron + i + (di + 1) * this->dim_inn, connection_idx,
+                                                 *this->solution);
+            }
+        }
+        printf("----------------------------------------------------------------------------------------------------\n");
 
-    size_t pd_idx;
-    /* connections between glueing neurons */
-    for(size_t di = 0; di < derivative_degree - 1; ++di){
-        pd_idx = partial_derivative_indices[di];/* partial derivative index */
-        for(size_t i = 0; i < this->dim_inn; ++i){
+        /* connection between the layer of glueing neurons toward the output neuron */
+        pd_idx = partial_derivative_indices[derivative_degree - 1];/* partial derivative index */
+        for (size_t i = 0; i < this->dim_inn; ++i) {
             connection_idx = pd_idx * this->dim_inn + i;
-            printf("  adding a connection between glue neuron  %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n", (int)(i + (di)*this->dim_inn), (int)(first_glue_neuron + i + (di)*this->dim_inn), (int)(i + (di + 1)*this->dim_inn), (int)(first_glue_neuron + i + (di + 1)*this->dim_inn), (int)connection_idx);
-            new_net->add_existing_connection(first_glue_neuron + i + (di)*this->dim_inn, first_glue_neuron + i + (di + 1)*this->dim_inn, connection_idx, *this->solution );
+            printf("  adding a connection between glue neuron %2d[%2d] and output neuron  %2d[%2d], connection index: %3d\n",
+                   (int) (i + (derivative_degree - 1) * this->dim_inn),
+                   (int) (first_glue_neuron + i + (derivative_degree - 1) * this->dim_inn), 0,
+                   (int) (first_output_neuron), (int) connection_idx);
+            new_net->add_existing_connection(first_glue_neuron + i + (derivative_degree - 1) * this->dim_inn,
+                                             first_output_neuron, connection_idx, *this->solution);
         }
-    }
-    printf("----------------------------------------------------------------------------------------------------\n");
-
-    /* connection between the layer of glueing neurons toward the output neuron */
-    pd_idx = partial_derivative_indices[derivative_degree - 1];/* partial derivative index */
-    for(size_t i = 0; i < this->dim_inn; ++i){
-        connection_idx = pd_idx * this->dim_inn + i;
-        printf("  adding a connection between glue neuron %2d[%2d] and output neuron  %2d[%2d], connection index: %3d\n", (int)(i + (derivative_degree - 1)*this->dim_inn), (int)(first_glue_neuron + i + (derivative_degree - 1)*this->dim_inn), 0, (int)(first_output_neuron), (int)connection_idx);
-        new_net->add_existing_connection(first_glue_neuron + i + (derivative_degree - 1)*this->dim_inn, first_output_neuron, connection_idx, *this->solution );
-    }
-
-    map_multiindices2nn[ alpha ] = new_net;
 
-    this->differential_equations->at( equation_idx )->add_network( new_net, expression_string );
-}
+        map_multiindices2nn[alpha] = new_net;
 
+        this->differential_equations->at(equation_idx)->add_network(new_net, expression_string);
+    }
 
-void DESolver::add_to_differential_equation( size_t equation_idx, std::string expression_string ) {
-
-    printf("Adding a known function '%s' to equation %d\n", expression_string.c_str( ), (int)equation_idx );
-    this->differential_equations->at( equation_idx )->add_network( nullptr, expression_string );
 
-}
+    void DESolver::add_to_differential_equation(size_t equation_idx, std::string expression_string) {
 
+        printf("Adding a known function '%s' to equation %d\n", expression_string.c_str(), (int) equation_idx);
+        this->differential_equations->at(equation_idx)->add_network(nullptr, expression_string);
 
-void DESolver::set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions) {
-    if( equation_idx >= this->n_equations ){
-        throw std::invalid_argument( "The parameter 'equation_idx' is too large! It exceeds the number of differential equations." );
     }
 
-    this->errors_functions_types->at( equation_idx ) = F;
-    this->errors_functions_data_sets->at( equation_idx ) = conditions;
-}
 
-//TODO instead use general method with Optimizer as its argument (create hierarchy of optimizers)
-void DESolver::solve_via_particle_swarm(std::vector<double> *domain_bounds, double c1, double c2, double w,
-                                          size_t n_particles, size_t max_iters, double gamma,
-                                          double epsilon, double delta) {
-
-    NeuralNetwork *nn;
-    DataSet *ds;
+    void DESolver::set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions) {
+        if (equation_idx >= this->n_equations) {
+            throw std::invalid_argument(
+                    "The parameter 'equation_idx' is too large! It exceeds the number of differential equations.");
+        }
 
-    /* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
-    std::vector<ErrorFunction*> error_functions( this->n_equations );
-    for(size_t i = 0; i < this->n_equations; ++i ){
-        nn = this->differential_equations->at( i );
-        ds = this->errors_functions_data_sets->at( i );
+        this->errors_functions_types->at(equation_idx) = F;
+        this->errors_functions_data_sets->at(equation_idx) = conditions;
+    }
 
-        if( this->errors_functions_types->at( i ) == ErrorFunctionType::ErrorFuncMSE ){
-            error_functions[i] = new MSE( nn, ds );
-        }
-        else{
-            //default
-            error_functions[i] = new MSE( nn, ds );
+//TODO instead use general method with Optimizer as its argument (create hierarchy of optimizers)
+    void DESolver::solve_via_particle_swarm(std::vector<double> *domain_bounds, double c1, double c2, double w,
+                                            size_t n_particles, size_t max_iters, double gamma,
+                                            double epsilon, double delta) {
+
+        NeuralNetwork *nn;
+        DataSet *ds;
+
+        /* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
+        std::vector<ErrorFunction *> error_functions(this->n_equations);
+        for (size_t i = 0; i < this->n_equations; ++i) {
+            nn = this->differential_equations->at(i);
+            ds = this->errors_functions_data_sets->at(i);
+
+            if (this->errors_functions_types->at(i) == ErrorFunctionType::ErrorFuncMSE) {
+                error_functions[i] = new MSE(nn, ds);
+            } else {
+                //default
+                error_functions[i] = new MSE(nn, ds);
+            }
         }
-    }
 
-    /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
-    ErrorSum total_error;
-    for(size_t i = 0; i < this->n_equations; ++i ) {
-        total_error.add_error_function( error_functions[i], 1.0 );
-    }
+        /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
+        ErrorSum total_error;
+        for (size_t i = 0; i < this->n_equations; ++i) {
+            total_error.add_error_function(error_functions[i], 1.0);
+        }
 
-    ParticleSwarm swarm_01(&total_error, domain_bounds, c1, c2, w, n_particles, max_iters);
+        ParticleSwarm swarm_01(&total_error, domain_bounds, c1, c2, w, n_particles, max_iters);
 
-    this->solution->randomize_weights();
-    this->solution->randomize_biases();
+        this->solution->randomize_weights();
+        this->solution->randomize_biases();
 
-    swarm_01.optimize(gamma, epsilon, delta);
+        swarm_01.optimize(gamma, epsilon, delta);
 
-    this->solution->copy_parameter_space(swarm_01.get_solution());
-}
+        this->solution->copy_parameter_space(swarm_01.get_solution());
+    }
 
-NeuralNetwork* DESolver::get_solution( MultiIndex &alpha ) {
-    return this->map_multiindices2nn[ alpha ];
-}
+    NeuralNetwork *DESolver::get_solution(MultiIndex &alpha) {
+        return this->map_multiindices2nn[alpha];
+    }
 
-double DESolver::eval_equation( size_t equation_idx, std::vector<double> *weight_and_biases, std::vector<double> &input ) {
-    std::vector<double> output(1);
+    double
+    DESolver::eval_equation(size_t equation_idx, std::vector<double> *weight_and_biases, std::vector<double> &input) {
+        std::vector<double> output(1);
 
-    this->differential_equations->at( equation_idx )->eval_single( input, output, weight_and_biases );
+        this->differential_equations->at(equation_idx)->eval_single(input, output, weight_and_biases);
 
 //    printf("Input: ");
 //    for( auto e: input ){
@@ -389,34 +417,35 @@ double DESolver::eval_equation( size_t equation_idx, std::vector<double> *weight
 //    }
 //    printf("\n");
 
-    return output[0];
-}
+        return output[0];
+    }
 
-double DESolver::eval_total_error(std::vector<double> &weights_and_biases) {
+    double DESolver::eval_total_error(std::vector<double> &weights_and_biases) {
 
-    NeuralNetwork *nn;
-    DataSet *ds;
+        NeuralNetwork *nn;
+        DataSet *ds;
 
-    /* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
-    std::vector<ErrorFunction*> error_functions( this->n_equations );
-    for(size_t i = 0; i < this->n_equations; ++i ){
-        nn = this->differential_equations->at( i );
-        ds = this->errors_functions_data_sets->at( i );
+        /* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
+        std::vector<ErrorFunction *> error_functions(this->n_equations);
+        for (size_t i = 0; i < this->n_equations; ++i) {
+            nn = this->differential_equations->at(i);
+            ds = this->errors_functions_data_sets->at(i);
 
-        if( this->errors_functions_types->at( i ) == ErrorFunctionType::ErrorFuncMSE ){
-            error_functions[i] = new MSE( nn, ds );
+            if (this->errors_functions_types->at(i) == ErrorFunctionType::ErrorFuncMSE) {
+                error_functions[i] = new MSE(nn, ds);
+            } else {
+                //default
+                error_functions[i] = new MSE(nn, ds);
+            }
         }
-        else{
-            //default
-            error_functions[i] = new MSE( nn, ds );
+
+        /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
+        ErrorSum total_error;
+        for (size_t i = 0; i < this->n_equations; ++i) {
+            total_error.add_error_function(error_functions[i], 1.0);
         }
-    }
 
-    /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
-    ErrorSum total_error;
-    for(size_t i = 0; i < this->n_equations; ++i ) {
-        total_error.add_error_function( error_functions[i], 1.0 );
+        return total_error.eval(&weights_and_biases);
     }
 
-    return total_error.eval( &weights_and_biases );
 }
\ No newline at end of file
diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h
index ecc0ad194e01b95987f43ab6f5724b501129ea86..f1a966a459b6a50e264802828ddd7e071346511b 100644
--- a/src/Solvers/DESolver.h
+++ b/src/Solvers/DESolver.h
@@ -13,9 +13,9 @@
 #ifndef INC_4NEURO_PDESOLVER_H
 #define INC_4NEURO_PDESOLVER_H
 
-#include "../settings.h"
-
 #include <map>
+
+#include "../settings.h"
 #include "../DataSet/DataSet.h"
 #include "../Network/NeuralNetwork.h"
 #include "../Network/NeuralNetworkSum.h"
@@ -24,155 +24,158 @@
 #include "../Neuron/NeuronLogistic.h"
 #include "../LearningMethods/ParticleSwarm.h"
 
-/**
- * class representing a multi-index of partial derivatives
- */
-class MultiIndex{
-private:
-    /**
-     * total number of variables
-     */
-    size_t dim;
-
-    /**
-     * a vector containing degrees of partial derivatives with respect to each variable
-     */
-    std::vector<size_t> partial_derivatives_degrees;
-
-public:
-    /**
-     *
-     * @param dimension
-     */
-    LIB4NEURO_API MultiIndex(size_t dimension);
-
-
-    /**
-     *
-     * @param index
-     * @param value
-     */
-    LIB4NEURO_API void set_partial_derivative(size_t index, size_t value);
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API std::vector<size_t>* get_partial_derivatives_degrees( ) ;
-
-
-    /**
-     *
-     * @param rhs
-     * @return
-     */
-    LIB4NEURO_API bool operator <(const MultiIndex& rhs) const;
-
-    /**
-     *
-     * @return
-     */
-    LIB4NEURO_API std::string to_string( ) const ;
+namespace lib4neuro {
 
     /**
-     *
-     * @return
+     * class representing a multi-index of partial derivatives
      */
-    LIB4NEURO_API size_t get_degree( ) const ;
-};
-
-
-
-class DESolver {
-private:
-
-    /* Mapping between used multiindices of partial derivatices and already defined NNs (in order to not define
-     * the same NN multiple times )*/
-    std::map<MultiIndex, NeuralNetwork*> map_multiindices2nn;
-
-    /* A list of the differential equations */
-    std::vector<NeuralNetworkSum*> * differential_equations = nullptr;
-
-    /* Error functions for differential equations */
-    std::vector<ErrorFunctionType> * errors_functions_types = nullptr;
-    std::vector<DataSet*> * errors_functions_data_sets = nullptr;
-
-    /* NN as the unknown function */
-    NeuralNetwork * solution = nullptr;
-
-    /* auxilliary variables */
-    std::vector<NeuronLogistic*> *solution_inner_neurons = nullptr;
-    size_t dim_i = 0, dim_inn = 0, n_equations = 0;
-
-public:
-    /**
-     * The attempted solution will contain 'm' inner neurons between the input neurons and the output neuron
-     * @param n_equations
-     * @param n_inputs
-     * @param m
-     */
-    LIB4NEURO_API DESolver( size_t n_equations, size_t n_inputs, size_t m );
-
-    /**
-     * default destructor
-     */
-    LIB4NEURO_API ~DESolver( );
-
-    /**
-     * Adds a new summand multiplied by 'beta' into the 'equation_idx'-th differential equation
-     * @param equation_idx
-     * @param alpha
-     * @param beta
-     */
-    LIB4NEURO_API void add_to_differential_equation( size_t equation_idx, MultiIndex &alpha, std::string expression_string );
-
-
-    /**
-     * Adds a known function to the equation
-     * @param equation_idx
-     * @param expression_string
-     */
-    LIB4NEURO_API void add_to_differential_equation( size_t equation_idx, std::string expression_string );
-
-    /**
-     * Sets the error function for the differential equation with the corresponding index
-     * @param equation_idx
-     * @param F
-     * @param conditions
-     */
-    LIB4NEURO_API void set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions);
-
-
-
-    LIB4NEURO_API void solve_via_particle_swarm(
-            std::vector<double> *domain_bounds,
-            double c1,
-            double c2,
-            double w,
-            size_t n_particles,
-            size_t max_iters,
-            double gamma,
-            double epsilon,
-            double delta
-            );
-
-    /**
-     * returns the pointer to the object representing the given partial derivative of the solution
-     * @return
-     */
-    LIB4NEURO_API NeuralNetwork* get_solution( MultiIndex &alpha );
-
-    /**
-     * For testing purposes only
-     */
-     LIB4NEURO_API double eval_equation( size_t equation_idx, std::vector<double> *weights_and_biases, std::vector<double> &input );
-
-     /**
-      * For testing purposes only
-      * @return
-      */
-     LIB4NEURO_API double eval_total_error( std::vector<double> &weights_and_biases );
-};
-
+    class MultiIndex {
+    private:
+        /**
+         * total number of variables
+         */
+        size_t dim;
+
+        /**
+         * a vector containing degrees of partial derivatives with respect to each variable
+         */
+        std::vector<size_t> partial_derivatives_degrees;
+
+    public:
+        /**
+         *
+         * @param dimension
+         */
+        LIB4NEURO_API MultiIndex(size_t dimension);
+
+
+        /**
+         *
+         * @param index
+         * @param value
+         */
+        LIB4NEURO_API void set_partial_derivative(size_t index, size_t value);
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API std::vector<size_t> *get_partial_derivatives_degrees();
+
+
+        /**
+         *
+         * @param rhs
+         * @return
+         */
+        LIB4NEURO_API bool operator<(const MultiIndex &rhs) const;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API std::string to_string() const;
+
+        /**
+         *
+         * @return
+         */
+        LIB4NEURO_API size_t get_degree() const;
+    };
+
+
+    class DESolver {
+    private:
+
+        /* Mapping between used multiindices of partial derivatices and already defined NNs (in order to not define
+         * the same NN multiple times )*/
+        std::map<MultiIndex, NeuralNetwork *> map_multiindices2nn;
+
+        /* A list of the differential equations */
+        std::vector<NeuralNetworkSum *> *differential_equations = nullptr;
+
+        /* Error functions for differential equations */
+        std::vector<ErrorFunctionType> *errors_functions_types = nullptr;
+        std::vector<DataSet *> *errors_functions_data_sets = nullptr;
+
+        /* NN as the unknown function */
+        NeuralNetwork *solution = nullptr;
+
+        /* auxilliary variables */
+        std::vector<NeuronLogistic *> *solution_inner_neurons = nullptr;
+        size_t dim_i = 0, dim_inn = 0, n_equations = 0;
+
+    public:
+        /**
+         * The attempted solution will contain 'm' inner neurons between the input neurons and the output neuron
+         * @param n_equations
+         * @param n_inputs
+         * @param m
+         */
+        LIB4NEURO_API DESolver(size_t n_equations, size_t n_inputs, size_t m);
+
+        /**
+         * default destructor
+         */
+        LIB4NEURO_API ~DESolver();
+
+        /**
+         * Adds a new summand multiplied by 'beta' into the 'equation_idx'-th differential equation
+         * @param equation_idx
+         * @param alpha
+         * @param beta
+         */
+        LIB4NEURO_API void
+        add_to_differential_equation(size_t equation_idx, MultiIndex &alpha, std::string expression_string);
+
+
+        /**
+         * Adds a known function to the equation
+         * @param equation_idx
+         * @param expression_string
+         */
+        LIB4NEURO_API void add_to_differential_equation(size_t equation_idx, std::string expression_string);
+
+        /**
+         * Sets the error function for the differential equation with the corresponding index
+         * @param equation_idx
+         * @param F
+         * @param conditions
+         */
+        LIB4NEURO_API void set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions);
+
+
+        LIB4NEURO_API void solve_via_particle_swarm(
+                std::vector<double> *domain_bounds,
+                double c1,
+                double c2,
+                double w,
+                size_t n_particles,
+                size_t max_iters,
+                double gamma,
+                double epsilon,
+                double delta
+        );
+
+        /**
+         * returns the pointer to the object representing the given partial derivative of the solution
+         * @return
+         */
+        LIB4NEURO_API NeuralNetwork *get_solution(MultiIndex &alpha);
+
+        /**
+         * For testing purposes only
+         */
+        LIB4NEURO_API double
+        eval_equation(size_t equation_idx, std::vector<double> *weights_and_biases, std::vector<double> &input);
+
+        /**
+         * For testing purposes only
+         * @return
+         */
+        LIB4NEURO_API double eval_total_error(std::vector<double> &weights_and_biases);
+    };
+
+}
 
 #endif //INC_4NEURO_PDESOLVER_H
diff --git a/src/constants.h b/src/constants.h
index 8c2674a51a351abbe3a8d2a8b52e46ca213f7a96..e31a0a3a02f797be84ec32d34ee5acbde2a9ac5a 100644
--- a/src/constants.h
+++ b/src/constants.h
@@ -5,7 +5,9 @@
 #ifndef INC_4NEURO_CONSTANTS_H
 #define INC_4NEURO_CONSTANTS_H
 
-#define E 2.7182818284590
-#define PI 3.14159265358979323846
+namespace lib4neuro {
+    const double E = 2.7182818284590;
+    const double PI = 3.14159265358979323846;
+}
 
 #endif //INC_4NEURO_CONSTANTS_H
diff --git a/src/examples/main.cpp b/src/examples/main.cpp
index c2d3a38cf22ae01b7795437630e918a7fa605c10..6010c1443279d9a7a89507df1423cbbefdb548a5 100644
--- a/src/examples/main.cpp
+++ b/src/examples/main.cpp
@@ -16,9 +16,6 @@
 
 
 int main(int argc, char** argv){
-    MSG_INFO("INFO MESSAGE!");
-
-    MSG_DEBUG("DEBUG MESSAGE");
 
     return 0;
 }
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index cce48453df61831b97fd79b024360f76421cae81..0e6fb421407a11bd3991648c822d54eb264430f9 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -31,29 +31,29 @@ int main() {
     out = {0.75};
     data_vec.emplace_back(std::make_pair(inp, out));
 
-    DataSet ds(&data_vec);
+    l4n::DataSet ds(&data_vec);
 
     /* NETWORK DEFINITION */
-    NeuralNetwork net;
+    l4n::NeuralNetwork net;
 
     /* Input neurons */
-    NeuronLinear *i1 = new NeuronLinear( );  //f(x) = x
-    NeuronLinear *i2 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i1 = new l4n::NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i2 = new l4n::NeuronLinear( );  //f(x) = x
 
     /* Output neuron */
-    NeuronLinear *o1 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
 
 
 
     /* Adding neurons to the net */
-    size_t idx1 = net.add_neuron(i1, BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NO_BIAS);
+    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NO_BIAS);
 //
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
-    net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
 
     //net.randomize_weights();
 
@@ -68,7 +68,7 @@ int main() {
     net.specify_input_neurons(net_input_neurons_indices);
     net.specify_output_neurons(net_output_neurons_indices);
     /* ERROR FUNCTION SPECIFICATION */
-    MSE mse(&net, &ds);
+    l4n::MSE mse(&net, &ds);
 
     /* TRAINING METHOD SETUP */
     std::vector<double> domain_bounds = {-10.0, 10.0, -10.0, 10.0, -10.0, 10.0};
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index 9f8562e0ec0a0f494498965da3fb4bee94f21889..e0be02e2b26e9497e839f73df117fb4bdfac1603 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -34,35 +34,35 @@ int main() {
     inp = {0, 0, 1.25};
     out = {0, 0.63};
     data_vec.emplace_back(std::make_pair(inp, out));
-    DataSet ds(&data_vec);
+    l4n::DataSet ds(&data_vec);
 
     /* NETWORK DEFINITION */
-    NeuralNetwork net;
+    l4n::NeuralNetwork net;
 
     /* Input neurons */
-    NeuronLinear *i1 = new NeuronLinear( );  //f(x) = x
-    NeuronLinear *i2 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i1 = new l4n::NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i2 = new l4n::NeuronLinear( );  //f(x) = x
 
     double b = 1;//bias
-    NeuronLinear *i3 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i3 = new l4n::NeuronLinear( );  //f(x) = x
 
     /* Output neurons */
-    NeuronLinear *o1 = new NeuronLinear( );  //f(x) = x
-    NeuronLinear *o2 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *o2 = new l4n::NeuronLinear( );  //f(x) = x
 
 
 
     /* Adding neurons to the nets */
-    size_t idx1 = net.add_neuron(i1, BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NEXT_BIAS);
-    size_t idx4 = net.add_neuron(i3, BIAS_TYPE::NEXT_BIAS);
-    size_t idx5 = net.add_neuron(o2, BIAS_TYPE::NEXT_BIAS);
+    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx4 = net.add_neuron(i3, l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx5 = net.add_neuron(o2, l4n::BIAS_TYPE::NEXT_BIAS);
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
-    net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
-    net.add_connection_simple(idx4, idx5, SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
+    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
+    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
+    net.add_connection_simple(idx4, idx5, l4n::SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
 
     net.randomize_weights();
 
@@ -78,12 +78,10 @@ int main() {
 
     net.specify_input_neurons(net_input_neurons_indices);
     net.specify_output_neurons(net_output_neurons_indices);
-
-
-
+    
 
     /* COMPLEX ERROR FUNCTION SPECIFICATION */
-    MSE mse(&net, &ds);
+    l4n::MSE mse(&net, &ds);
 
 //    double weights[2] = {-0.18012411, -0.17793740};
 //    double weights[2] = {1, 1};
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index 50bb9950c1b8446ecea7dd8f1a5967e2b5fb960c..63f1a6f90f432e474a040596c22542b57b922816 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -29,40 +29,40 @@ int main() {
     out = {0.75};
     data_vec_01.emplace_back(std::make_pair(inp, out));
 
-    DataSet ds_01(&data_vec_01);
+    l4n::DataSet ds_01(&data_vec_01);
 
 
     inp = {1.25};
     out = {0.63};
     data_vec_02.emplace_back(std::make_pair(inp, out));
-    DataSet ds_02(&data_vec_02);
+    l4n::DataSet ds_02(&data_vec_02);
 
     /* NETWORK DEFINITION */
-    NeuralNetwork net;
+    l4n::NeuralNetwork net;
 
     /* Input neurons */
-    NeuronLinear *i1 = new NeuronLinear();  //f(x) = x
-    NeuronLinear *i2 = new NeuronLinear();  //f(x) = x
+    l4n::NeuronLinear *i1 = new l4n::NeuronLinear();  //f(x) = x
+    l4n::NeuronLinear *i2 = new l4n::NeuronLinear();  //f(x) = x
 
-    NeuronLinear *i3 = new NeuronLinear( ); //f(x) = x
+    l4n::NeuronLinear *i3 = new l4n::NeuronLinear( ); //f(x) = x
 
     /* Output neurons */
-    NeuronLinear *o1 = new NeuronLinear( );  //f(x) = x
-    NeuronLinear *o2 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *o2 = new l4n::NeuronLinear( );  //f(x) = x
 
 
 
     /* Adding neurons to the nets */
-    size_t idx1 = net.add_neuron(i1, BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NEXT_BIAS);
-    size_t idx4 = net.add_neuron(i3, BIAS_TYPE::NEXT_BIAS);
-    size_t idx5 = net.add_neuron(o2, BIAS_TYPE::NEXT_BIAS);
+    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx4 = net.add_neuron(i3, l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx5 = net.add_neuron(o2, l4n::BIAS_TYPE::NEXT_BIAS);
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
-    net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
-    net.add_connection_simple(idx4, idx5, SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
+    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
+    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
+    net.add_connection_simple(idx4, idx5, l4n::SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
 
     net.randomize_weights();
 
@@ -88,18 +88,18 @@ int main() {
     subnet_01_input_neurons.push_back(idx1);
     subnet_01_input_neurons.push_back(idx2);
     subnet_01_output_neurons.push_back(idx3);
-    NeuralNetwork *subnet_01 = net.get_subnet( subnet_01_input_neurons, subnet_01_output_neurons );
+    l4n::NeuralNetwork *subnet_01 = net.get_subnet( subnet_01_input_neurons, subnet_01_output_neurons );
 
     subnet_02_input_neurons.push_back(idx4);
     subnet_02_output_neurons.push_back(idx5);
-    NeuralNetwork *subnet_02 = net.get_subnet( subnet_02_input_neurons, subnet_02_output_neurons );
+    l4n::NeuralNetwork *subnet_02 = net.get_subnet( subnet_02_input_neurons, subnet_02_output_neurons );
 
     if(subnet_01 && subnet_02){
         /* COMPLEX ERROR FUNCTION SPECIFICATION */
-        MSE mse_01(subnet_01, &ds_01);
-        MSE mse_02(subnet_02, &ds_02);
+        l4n::MSE mse_01(subnet_01, &ds_01);
+        l4n::MSE mse_02(subnet_02, &ds_02);
 
-        ErrorSum mse_sum;
+        l4n::ErrorSum mse_sum;
         mse_sum.add_error_function( &mse_01 );
         mse_sum.add_error_function( &mse_02 );
 
diff --git a/src/examples/net_test_harmonic_oscilator.cpp b/src/examples/net_test_harmonic_oscilator.cpp
index 17fddd014d734cd2e8d419b34ee781817e06f8cb..12286a2c42a87768296907fbd5e39101bed4934a 100644
--- a/src/examples/net_test_harmonic_oscilator.cpp
+++ b/src/examples/net_test_harmonic_oscilator.cpp
@@ -21,11 +21,11 @@ void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_
     /* SOLVER SETUP */
     size_t n_inputs = 1;
     size_t n_equations = 1;
-    DESolver solver( n_equations, n_inputs, n_inner_neurons );
+    l4n::DESolver solver( n_equations, n_inputs, n_inner_neurons );
 
     /* SETUP OF THE EQUATIONS */
-    MultiIndex alpha_0( n_inputs );
-    MultiIndex alpha_2( n_inputs );
+    l4n::MultiIndex alpha_0( n_inputs );
+    l4n::MultiIndex alpha_2( n_inputs );
     alpha_2.set_partial_derivative(0, 2);
 
     /* the governing differential equation */
@@ -56,10 +56,10 @@ void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_
 //    out = {1.0};
 //    data_vec_g.emplace_back(std::make_pair(inp, out));
 
-    DataSet ds_00(&data_vec_g);
+    l4n::DataSet ds_00(&data_vec_g);
 
     /* Placing the conditions into the solver */
-    solver.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+    solver.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
 
     /* PARTICLE SWARM TRAINING METHOD SETUP */
     size_t total_dim = (2 + n_inputs) * n_inner_neurons;
@@ -89,7 +89,7 @@ void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_
     double delta = 0.9;
     solver.solve_via_particle_swarm( &domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );
 
-    NeuralNetwork *solution = solver.get_solution( alpha_0 );
+    l4n::NeuralNetwork *solution = solver.get_solution( alpha_0 );
     std::vector<double> parameters(total_dim);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
     std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
     std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
diff --git a/src/examples/net_test_ode_1.cpp b/src/examples/net_test_ode_1.cpp
index 8f78c49750ca8a6b6edf6f9757813db8dbd4a982..7c5ccb2ebf967022973d502c0ea13cb80d6b4edc 100644
--- a/src/examples/net_test_ode_1.cpp
+++ b/src/examples/net_test_ode_1.cpp
@@ -22,15 +22,15 @@
 #include "4neuro.h"
 
 double eval_f(double x){
-    return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
+    return std::pow(l4n::E, -2.0 * x) * (3.0 * x + 1.0);
 }
 
 double eval_df(double x){
-    return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
+    return std::pow(l4n::E, -2.0 * x) * (1.0 - 6.0 * x);
 }
 
 double eval_ddf(double x){
-    return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
+    return 4.0 * std::pow(l4n::E, -2.0 * x) * (3.0 * x - 2.0);
 }
 
 double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> &parameters){
@@ -41,7 +41,7 @@ double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> &para
         ai = parameters[3 * i + 1];
         bi = parameters[3 * i + 2];
 
-        ei = std::pow(E, bi - wi * x);
+        ei = std::pow(l4n::E, bi - wi * x);
         ei1 = ei + 1.0;
 
         value += ai / (ei1);
@@ -57,7 +57,7 @@ double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> &par
         ai = parameters[3 * i + 1];
         bi = parameters[3 * i + 2];
 
-        ei = std::pow(E, bi - wi * x);
+        ei = std::pow(l4n::E, bi - wi * x);
         ei1 = ei + 1.0;
 
         value += ai * wi * ei / (ei1 * ei1);
@@ -76,8 +76,8 @@ double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> &pa
         bi = parameters[3 * i + 2];
 
 
-        eb = std::pow(E, bi);
-        ewx = std::pow(E, wi * x);
+        eb = std::pow(l4n::E, bi);
+        ewx = std::pow(l4n::E, wi * x);
 
         value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
     }
@@ -92,7 +92,7 @@ double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> &parame
     ai = parameters[3 * neuron_idx + 1];
     bi = parameters[3 * neuron_idx + 2];
 
-    ei = std::pow(E, bi - wi * x);
+    ei = std::pow(l4n::E, bi - wi * x);
     ei1 = ei + 1.0;
 
     return (ai * x * ei) / (ei1 * ei1);
@@ -107,7 +107,7 @@ double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> &param
     ai = parameters[3 * neuron_idx + 1];
     bi = parameters[3 * neuron_idx + 2];
 
-    ei = std::pow(E, bi - wi * x);
+    ei = std::pow(l4n::E, bi - wi * x);
     ei1 = ei + 1.0;
 
     return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
@@ -121,8 +121,8 @@ double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> &para
     ai = parameters[3 * neuron_idx + 1];
     bi = parameters[3 * neuron_idx + 2];
 
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi * x);
+    eb = std::pow(l4n::E, bi);
+    ewx = std::pow(l4n::E, wi * x);
 
     return  -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
 }
@@ -134,7 +134,7 @@ double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> &parame
     wi = parameters[3 * neuron_idx];
     bi = parameters[3 * neuron_idx + 2];
 
-    ei = std::pow(E, bi - wi * x);
+    ei = std::pow(l4n::E, bi - wi * x);
     ei1 = ei + 1.0;
 
     return 1.0 / ei1;
@@ -147,7 +147,7 @@ double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> &param
     wi = parameters[3 * neuron_idx];
     bi = parameters[3 * neuron_idx + 2];
 
-    ei = std::pow(E, bi - wi * x);
+    ei = std::pow(l4n::E, bi - wi * x);
     ei1 = ei + 1.0;
 
     return (wi*ei)/(ei1 * ei1);
@@ -160,9 +160,9 @@ double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> &para
     wi = parameters[3 * neuron_idx];
     bi = parameters[3 * neuron_idx + 2];
 
-    eip = std::pow(E, bi + wi * x);
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi * x);
+    eip = std::pow(l4n::E, bi + wi * x);
+    eb = std::pow(l4n::E, bi);
+    ewx = std::pow(l4n::E, wi * x);
 
     return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
 }
@@ -174,7 +174,7 @@ double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> &parame
     ai = parameters[3 * neuron_idx + 1];
     bi = parameters[3 * neuron_idx + 2];
 
-    ei = std::pow(E, bi - wi * x);
+    ei = std::pow(l4n::E, bi - wi * x);
     ei1 = ei + 1.0;
 
     return -(ai * ei)/(ei1 * ei1);
@@ -188,8 +188,8 @@ double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> &param
     ai = parameters[3 * neuron_idx + 1];
     bi = parameters[3 * neuron_idx + 2];
 
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi*x);
+    eb = std::pow(l4n::E, bi);
+    ewx = std::pow(l4n::E, wi*x);
 
     return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
 }
@@ -202,8 +202,8 @@ double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> &para
     ai = parameters[3 * neuron_idx + 1];
     bi = parameters[3 * neuron_idx + 2];
 
-    eb = std::pow(E, bi);
-    ewx = std::pow(E, wi*x);
+    eb = std::pow(l4n::E, bi);
+    ewx = std::pow(l4n::E, wi*x);
 
     return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
 }
@@ -345,7 +345,7 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
 //        data_points[i] = x;
 //    }
 
-//    DataSet ds(0.0, 4.0, train_size, 0.0);
+//    l4n::DataSet ds(0.0, 4.0, train_size, 0.0);
 
     std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
     std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
@@ -414,7 +414,7 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
                 sx += (gradient_current->at( i ) * gradient_prev->at( i ));
             }
             sx /= grad_norm * grad_norm_prev;
-            beta = std::sqrt(std::acos( sx ) / PI);
+            beta = std::sqrt(std::acos( sx ) / l4n::PI);
 
 
 //            eval_step_size_simple( gamma, val, prev_val, sk, grad_norm, grad_norm_prev );
@@ -485,12 +485,12 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
     /* SOLVER SETUP */
     size_t n_inputs = 1;
     size_t n_equations = 3;
-    DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
+    l4n::DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
 
     /* SETUP OF THE EQUATIONS */
-    MultiIndex alpha_0( n_inputs );
-    MultiIndex alpha_1( n_inputs );
-    MultiIndex alpha_2( n_inputs );
+    l4n::MultiIndex alpha_0( n_inputs );
+    l4n::MultiIndex alpha_1( n_inputs );
+    l4n::MultiIndex alpha_2( n_inputs );
     alpha_2.set_partial_derivative(0, 2);
     alpha_1.set_partial_derivative(0, 1);
 
@@ -535,26 +535,26 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
 //
 //        test_points[i] = inp[0];
 //    }
-    DataSet ds_00(&data_vec_g);
+    l4n::DataSet ds_00(&data_vec_g);
 
     /* TRAIN DATA FOR DIRICHLET BC */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_y;
     inp = {0.0};
     out = {1.0};
     data_vec_y.emplace_back(std::make_pair(inp, out));
-    DataSet ds_01(&data_vec_y);
+    l4n::DataSet ds_01(&data_vec_y);
 
     /* TRAIN DATA FOR NEUMANN BC */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
     inp = {0.0};
     out = {1.0};
     data_vec_dy.emplace_back(std::make_pair(inp, out));
-    DataSet ds_02(&data_vec_dy);
+    l4n::DataSet ds_02(&data_vec_dy);
 
     /* Placing the conditions into the solver */
-    solver_01.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
-    solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_01 );
-    solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_02 );
+    solver_01.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+    solver_01.set_error_function( 1, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_01 );
+    solver_01.set_error_function( 2, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_02 );
 
 
     size_t total_dim = (2 + n_inputs) * n_inner_neurons;
@@ -626,9 +626,9 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
     double delta = 0.9;
     solver_01.solve_via_particle_swarm( &domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );
 
-    NeuralNetwork *solution = solver_01.get_solution( alpha_0 );
-    NeuralNetwork *solution_d = solver_01.get_solution( alpha_1 );
-    NeuralNetwork *solution_dd = solver_01.get_solution( alpha_2 );
+    l4n::NeuralNetwork *solution = solver_01.get_solution( alpha_0 );
+    l4n::NeuralNetwork *solution_d = solver_01.get_solution( alpha_1 );
+    l4n::NeuralNetwork *solution_dd = solver_01.get_solution( alpha_2 );
 
     std::vector<double> parameters(total_dim);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
     std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
@@ -660,7 +660,7 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
         solution_dd->eval_single( inp, out);
         double DDF = out[0];
 
-        ofs << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << F << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << DF << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << DDF << std::endl;
+        ofs << i + 1 << " " << x << " " << std::pow(l4n::E, -2*x) * (3*x + 1)<< " " << F << " " << std::pow(l4n::E, -2*x) * (1 - 6*x)<< " " << DF << " " << 4 * std::pow(l4n::E, -2*x) * (3*x - 2)<< " " << DDF << std::endl;
 
         printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\r", (100.0 * i) / (n_test_points - 1));
         std::cout.flush();
@@ -681,7 +681,7 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
 //
 //        solution->eval_single(input, output);
 //
-//        std::cout << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;
+//        std::cout << i + 1 << " " << x << " " << std::pow(l4n::E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(l4n::E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(l4n::E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;
 //    }
 
 }
diff --git a/src/examples/net_test_pde_1.cpp b/src/examples/net_test_pde_1.cpp
index bff0202ab547e1d3e1da75f97b4530d3093628c3..e801e70ee4a5602725b38d4295a12af91f3f758c 100644
--- a/src/examples/net_test_pde_1.cpp
+++ b/src/examples/net_test_pde_1.cpp
@@ -34,7 +34,7 @@ double eval_approx_y(double x, double t, size_t n_inner_neurons, std::vector<dou
         ai  = parameters[4 * i + 2];
         bi  = parameters[4 * i + 3];
 
-        ei = std::pow(E, bi - wxi * x - wti * t);
+        ei = std::pow(l4n::E, bi - wxi * x - wti * t);
         ei1 = ei + 1.0;
 
         value += ai / (ei1);
@@ -52,7 +52,7 @@ double eval_approx_yt(double x, double t, size_t n_inner_neurons, std::vector<do
         ai  = parameters[4 * i + 2];
         bi  = parameters[4 * i + 3];
 
-        ei = std::pow(E, bi - wxi * x - wti * t);
+        ei = std::pow(l4n::E, bi - wxi * x - wti * t);
         ei1 = ei + 1.0;
 
         value += ai * wti * ei / (ei1 * ei1);
@@ -70,7 +70,7 @@ double eval_approx_yx(double x, double t, size_t n_inner_neurons, std::vector<do
         ai  = parameters[4 * i + 2];
         bi  = parameters[4 * i + 3];
 
-        ei = std::pow(E, bi - wxi * x - wti * t);
+        ei = std::pow(l4n::E, bi - wxi * x - wti * t);
         ei1 = ei + 1.0;
 
         value += (ai * wxi * ei1)/(ei1 * ei1);
@@ -87,7 +87,7 @@ double eval_approx_yxx(double x, double t, size_t n_inner_neurons, std::vector<d
         ai  = parameters[4 * i + 2];
         bi  = parameters[4 * i + 3];
 
-        ei = std::pow(E, bi - wxi * x - wti * t);
+        ei = std::pow(l4n::E, bi - wxi * x - wti * t);
         ei1 = ei + 1.0;
 
         value += (2 * ai * wxi * wxi * ei * ei) / (ei1 * ei1 * ei1) - (ai * wxi * wxi * ei) / (ei1 * ei1);
@@ -102,7 +102,7 @@ double eval_approx_da_y(double x, double t, size_t neuron_idx, std::vector<doubl
     wti = parameters[4 * neuron_idx + 1];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return 1.0 / ei1;
@@ -115,7 +115,7 @@ double eval_approx_dwx_y(double x, double t, size_t neuron_idx, std::vector<doub
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return  (ai * x * ei)/(ei1 * ei1);
@@ -128,7 +128,7 @@ double eval_approx_dwt_y(double x, double t, size_t neuron_idx, std::vector<doub
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return  (ai * t * ei)/(ei1 * ei1);
@@ -141,7 +141,7 @@ double eval_approx_db_y(double x, double t, size_t neuron_idx, std::vector<doubl
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return -(ai * ei)/(ei1 * ei1);
@@ -154,7 +154,7 @@ double eval_approx_da_yt(double x, double t, size_t neuron_idx, std::vector<doub
     wti = parameters[4 * neuron_idx + 1];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return (wti * ei)/(ei1 * ei1);
@@ -167,7 +167,7 @@ double eval_approx_dwx_yt(double x, double t, size_t neuron_idx, std::vector<dou
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return (2 * ai * wti * x * ei * ei)/(ei1 * ei1 * ei1) - (ai * wti * x * ei)/(ei1 * ei1);
@@ -180,7 +180,7 @@ double eval_approx_dwt_yt(double x, double t, size_t neuron_idx, std::vector<dou
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return  -(ai * t * wti * ei) / (ei1 * ei1) + (2 * ai * t * wti * ei * ei)/(ei1 * ei1 * ei1) + (ai * ei)/(ei1 * ei1);
@@ -193,7 +193,7 @@ double eval_approx_db_yt(double x, double t, size_t neuron_idx, std::vector<doub
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
     ei1 = ei + 1.0;
 
     return (ai * wti * ei) / (ei1 * ei1) - (2 * ai * wti * ei * ei) / (ei1 * ei1 * ei1);
@@ -206,10 +206,10 @@ double eval_approx_da_yxx(double x, double t, size_t neuron_idx, std::vector<dou
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
-    ebp= std::pow(E, bi + wxi * x + wti * t);
-    eb = std::pow(E, bi);
-    etx = std::pow(E, wxi * x + wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
+    ebp= std::pow(l4n::E, bi + wxi * x + wti * t);
+    eb = std::pow(l4n::E, bi);
+    etx = std::pow(l4n::E, wxi * x + wti * t);
     ei1 = eb + etx;
 
     return -(wxi * wxi * ebp * (etx - eb))/(ei1 * ei1 * ei1);
@@ -222,10 +222,10 @@ double eval_approx_dwx_yxx(double x, double t, size_t neuron_idx, std::vector<do
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
-    ebp= std::pow(E, bi + wxi * x + wti * t);
-    eb = std::pow(E, bi);
-    etx = std::pow(E, wxi * x + wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
+    ebp= std::pow(l4n::E, bi + wxi * x + wti * t);
+    eb = std::pow(l4n::E, bi);
+    etx = std::pow(l4n::E, wxi * x + wti * t);
     ei1 = eb + etx;
 
     return (ai * wxi * wxi * x * ei) / ((ei + 1) * (ei + 1)) - (6 * ai * wxi * wxi * x * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1)) + (6 * ai * wxi *wxi * x * ei * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1) * (ei + 1)) - (2 * ai * wxi * ei) / ((ei + 1) * (ei + 1)) + (4 * ai * wxi * ei * ei)/((ei + 1) * (ei + 1) * (ei + 1));
@@ -238,10 +238,10 @@ double eval_approx_dwt_yxx(double x, double t, size_t neuron_idx, std::vector<do
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
-    ebp= std::pow(E, bi + wxi * x + wti * t);
-    eb = std::pow(E, bi);
-    etx = std::pow(E, wxi * x + wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
+    ebp= std::pow(l4n::E, bi + wxi * x + wti * t);
+    eb = std::pow(l4n::E, bi);
+    etx = std::pow(l4n::E, wxi * x + wti * t);
     ei1 = eb + etx;
 
     return (ai * t * wxi * wxi * ei) / ((ei + 1) * (ei + 1)) - (6 * ai * t * wxi * wxi * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1)) + (6 * ai * t * wxi * wxi * ei * ei * ei) / ((ei + 1) * (ei + 1) * (ei + 1) * (ei + 1));
@@ -254,10 +254,10 @@ double eval_approx_db_yxx(double x, double t, size_t neuron_idx, std::vector<dou
     ai =  parameters[4 * neuron_idx + 2];
     bi =  parameters[4 * neuron_idx + 3];
 
-    ei = std::pow(E, bi - wxi * x - wti * t);
-    ebp= std::pow(E, bi + wxi * x + wti * t);
-    eb = std::pow(E, bi);
-    etx = std::pow(E, wxi * x + wti * t);
+    ei = std::pow(l4n::E, bi - wxi * x - wti * t);
+    ebp= std::pow(l4n::E, bi + wxi * x + wti * t);
+    eb = std::pow(l4n::E, bi);
+    etx = std::pow(l4n::E, wxi * x + wti * t);
     ei1 = eb + etx;
 
     return (ai * wxi * wxi * eb * ebp) / (ei1 * ei1 * ei1) - (ai * wxi * wxi * ebp * (etx - eb)) / (ei1 * ei1 * ei1) + (3 * ai * wxi * wxi * eb * ebp * (etx - eb)) / (ei1 * ei1 * ei1 * ei1);
@@ -329,7 +329,7 @@ double calculate_gradient( std::vector<double> &data_points, size_t n_inner_neur
     for(i = 0; i < train_size; ++i){
 
         x = data_points[i];
-        mem = (std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x ) - eval_approx_y(x, 0.0, n_inner_neurons, *parameters));
+        mem = (std::pow(l4n::E, -0.707106781 * x) * std::sin( -0.707106781 * x ) - eval_approx_y(x, 0.0, n_inner_neurons, *parameters));
         derror = 2.0 * mem / train_size;
 
         for(j = 0; j < n_inner_neurons; ++j){
@@ -454,7 +454,7 @@ void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t
                 sx += (gradient_current->at( i ) * gradient_prev->at( i ));
             }
             sx /= grad_norm * grad_norm_prev;
-            beta = std::sqrt(std::acos( sx ) / PI);
+            beta = std::sqrt(std::acos( sx ) / l4n::PI);
 
 
 //            eval_step_size_simple( gamma, val, prev_val, sk, grad_norm, grad_norm_prev );
@@ -606,7 +606,7 @@ void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t
 //        t = frac * i + ts;
 //
 //        double yt = std::sin(t);
-//        double yx = std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x );
+//        double yx = std::pow(l4n::E, -0.707106781 * x) * std::sin( -0.707106781 * x );
 //
 //        double evalt = eval_approx_y(0, t, n_inner_neurons, *params_current);
 //        double evalx = eval_approx_y(x, 0, n_inner_neurons, *params_current);
@@ -637,12 +637,12 @@ void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_
     /* do not change below */
     size_t n_inputs = 2;
     size_t n_equations = 3;
-    DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
+    l4n::DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
 
     /* SETUP OF THE EQUATIONS */
-    MultiIndex alpha_00( n_inputs );
-    MultiIndex alpha_01( n_inputs );
-    MultiIndex alpha_20( n_inputs );
+    l4n::MultiIndex alpha_00( n_inputs );
+    l4n::MultiIndex alpha_01( n_inputs );
+    l4n::MultiIndex alpha_20( n_inputs );
 
     alpha_00.set_partial_derivative(0, 0);
     alpha_00.set_partial_derivative(1, 0);
@@ -675,7 +675,7 @@ void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_
         output[0] = 0.0;
         return output;
     };
-    DataSet ds_00(test_bounds_2d, train_size, f1, 1);
+    l4n::DataSet ds_00(test_bounds_2d, train_size, f1, 1);
 
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_t;
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_x;
@@ -687,20 +687,20 @@ void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_
         data_vec_t.emplace_back(std::make_pair(inp, out));
 
         inp = {frac * i, 0.0};
-        out = {std::pow(E, -0.707106781 * inp[0]) * std::sin( -0.707106781 * inp[0] )};
+        out = {std::pow(l4n::E, -0.707106781 * inp[0]) * std::sin( -0.707106781 * inp[0] )};
         data_vec_x.emplace_back(std::make_pair(inp, out));
 
     }
-    DataSet ds_t(&data_vec_t);
-    DataSet ds_x(&data_vec_x);
+    l4n::DataSet ds_t(&data_vec_t);
+    l4n::DataSet ds_x(&data_vec_x);
 
 
 
 
     /* Placing the conditions into the solver */
-    solver_01.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
-    solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_t );
-    solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_x );
+    solver_01.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+    solver_01.set_error_function( 1, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_t );
+    solver_01.set_error_function( 2, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_x );
 
     size_t total_dim = (2 + n_inputs) * n_inner_neurons;
 
@@ -786,7 +786,7 @@ void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_
 //        t = frac * i + ts;
 //
 //        double yt = std::sin(t);
-//        double yx = std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x );
+//        double yx = std::pow(l4n::E, -0.707106781 * x) * std::sin( -0.707106781 * x );
 //
 //        double evalt = eval_approx_y(0, t, n_inner_neurons, export_params);
 //        double evalx = eval_approx_y(x, 0, n_inner_neurons, export_params);
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index 92f821c0a9337c6236d931060dd818e48b055959..5ef8e75d5e6207f6d5468874fc855e784bdc890f 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -33,24 +33,24 @@ int main() {
     out = {0.75};
     data_vec.emplace_back(std::make_pair(inp, out));
 
-    DataSet ds(&data_vec);
+    l4n::DataSet ds(&data_vec);
 
     /* NETWORK DEFINITION */
-    NeuralNetwork net;
+    l4n::NeuralNetwork net;
 
     /* Input neurons */
-    NeuronLinear *i1 = new NeuronLinear( );  //f(x) = x
-    NeuronLinear *i2 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i1 = new l4n::NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *i2 = new l4n::NeuronLinear( );  //f(x) = x
 
     /* Output neuron */
-    NeuronLinear *o1 = new NeuronLinear( );  //f(x) = x
+    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
 
 
 
     /* Adding neurons to the net */
-    size_t idx1 = net.add_neuron(i1, BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NEXT_BIAS);
+    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS);
 
     std::vector<double> *bv = net.get_parameter_ptr_biases();
     for(size_t i = 0; i < 1; ++i){
@@ -58,8 +58,8 @@ int main() {
     }
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
-    net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
 
     //net.randomize_weights();
 
@@ -75,7 +75,7 @@ int main() {
     net.specify_output_neurons(net_output_neurons_indices);
 
     /* ERROR FUNCTION SPECIFICATION */
-    MSE mse(&net, &ds);
+    l4n::MSE mse(&net, &ds);
 
     /* TRAINING METHOD SETUP */
     std::vector<double> domain_bounds = {-10.0, 10.0, -10.0, 10.0, -10.0, 10.0};
@@ -118,7 +118,7 @@ int main() {
     std::cout << "********************************************************************************************************************************************" <<std::endl;
 
     std::cout << "Network loaded from a file" << std::endl;
-    NeuralNetwork net2("saved_network.4nt");
+    l4n::NeuralNetwork net2("saved_network.4nt");
     net2.print_stats();
     std::cout << "--------------------------------------------------------------------------------------------------------------------------------------------" <<std::endl;
     error = 0.0;
diff --git a/src/exceptions.h b/src/exceptions.h
deleted file mode 100644
index 0beaf7b4c0f70cf6b1d2b0dc322bce4ec61df1b3..0000000000000000000000000000000000000000
--- a/src/exceptions.h
+++ /dev/null
@@ -1,16 +0,0 @@
-//
-// Created by martin on 9/17/18.
-//
-
-#ifndef LIB4NEURO_EXCEPTIONS_H
-#define LIB4NEURO_EXCEPTIONS_H
-
-//TODO implement NotImplementedException according to C++17
-//class NotImplementedException : public std::logic_error {
-//public:
-//    const char* what() {
-//        return "Function not yet implemented!";
-//    }
-//};
-
-#endif //LIB4NEURO_EXCEPTIONS_H
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index cd7d4ad4c8ab865dba0d047e6ead66b756e624bf..eea78435adbedd5baf35f2a988155181fe7781ad 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -4,55 +4,55 @@
 
 add_executable(linear_neuron_test NeuronLinear_test.cpp)
 target_link_libraries(linear_neuron_test lib4neuro boost_unit_test)
-target_include_directories(linear_neuron_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(linear_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(constant_neuron_test NeuronConstant_test.cpp)
 target_link_libraries(constant_neuron_test lib4neuro boost_unit_test)
-target_include_directories(constant_neuron_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(constant_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(binary_neuron_test NeuronBinary_test.cpp)
 target_link_libraries(binary_neuron_test lib4neuro boost_unit_test)
-target_include_directories(binary_neuron_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(binary_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(logistic_neuron_test NeuronLogistic_test.cpp)
 target_link_libraries(logistic_neuron_test lib4neuro boost_unit_test)
-target_include_directories(logistic_neuron_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(logistic_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
 target_link_libraries(connectionFunctionGeneral_test lib4neuro boost_unit_test)
-target_include_directories(connectionFunctionGeneral_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(connectionFunctionGeneral_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(neural_network_test NeuralNetwork_test.cpp)
 target_link_libraries(neural_network_test lib4neuro boost_unit_test)
-target_include_directories(neural_network_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(neural_network_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
 target_link_libraries(connection_Function_identity_test lib4neuro boost_unit_test)
-target_include_directories(connection_Function_identity_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(connection_Function_identity_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(dataset_test DataSet_test.cpp)
 target_link_libraries(dataset_test lib4neuro boost_unit_test)
-target_include_directories(dataset_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(dataset_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(errorfunction_test ErrorFunctions_test.cpp)
 target_link_libraries(errorfunction_test lib4neuro boost_unit_test)
-target_include_directories(errorfunction_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(errorfunction_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(particle_swarm_test ParticleSwarm_test.cpp)
 target_link_libraries(particle_swarm_test lib4neuro boost_unit_test)
-target_include_directories(particle_swarm_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(particle_swarm_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(particle_test Particle_test.cpp)
 target_link_libraries(particle_test lib4neuro boost_unit_test)
-target_include_directories(particle_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(particle_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
 target_link_libraries(NeuralNetworkSum_test lib4neuro boost_unit_test)
-target_include_directories(NeuralNetworkSum_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(NeuralNetworkSum_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 add_executable(DESolver_test DESolver_test.cpp)
 target_link_libraries(DESolver_test lib4neuro boost_unit_test)
-target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS})
+target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 set_target_properties(
     linear_neuron_test
diff --git a/src/tests/ConnectionFunctionGeneral_test.cpp b/src/tests/ConnectionFunctionGeneral_test.cpp
index b8c4e30e4c9b317a522e67f1edab451e4f9adde4..51430571ec1b8fd22151007ce70b41ab573bcbd5 100644
--- a/src/tests/ConnectionFunctionGeneral_test.cpp
+++ b/src/tests/ConnectionFunctionGeneral_test.cpp
@@ -26,10 +26,14 @@
 #include "../NetConnection/ConnectionFunctionGeneral.h"
 #include "../Neuron/NeuronLinear.h"
 #include <iostream>
+#include "../../external_dependencies/turtle/include/turtle/mock.hpp"
 /**
  * Boost testing suite for testing ConnectionFunctionGeneral.h
  */
-
+MOCK_BASE_CLASS( mock_connection, ConnectionFunctionGeneral ) // declare a 'mock_view' class implementing 'view'
+{
+    MOCK_METHOD( eval, 1 )      // implement the 'display' method from 'view' (taking 1 argument)
+};
 BOOST_AUTO_TEST_SUITE(Connection_test)
 
 /**
@@ -37,9 +41,12 @@ BOOST_AUTO_TEST_SUITE(Connection_test)
  */
     BOOST_AUTO_TEST_CASE(Connection_construction__test) {
 
+        mock_connection v;
+
+
         BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral *functionGeneral = new ConnectionFunctionGeneral());
 
-        std::vector<size_t> param_indices;
+             std::vector<size_t> param_indices;
         param_indices.push_back(0);
         std::string paramToFunction = "this do nothing! Why is it here?";
         BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral *functionGeneral = new ConnectionFunctionGeneral(param_indices,
diff --git a/src/tests/DESolver_test.cpp b/src/tests/DESolver_test.cpp
index 6f48aef53ac30dfc1da0ded3ddf3355b8405d4fc..07fa1be6037721070d655c58b12c4c10b61635c7 100644
--- a/src/tests/DESolver_test.cpp
+++ b/src/tests/DESolver_test.cpp
@@ -8,7 +8,7 @@
 #define BOOST_TEST_MODULE DESolver_test
 
 #ifdef _WINDOWS
-	#include <boost/test/included/unit_test.hpp>
+	#include <boost/test/included/unit_test.hpp> //TODO can be included under the conditionally-compiled block
 #else
 
 	#ifndef BOOST_TEST_DYN_LINK
@@ -26,6 +26,8 @@
 #include <iostream>
 #include "../Solvers/DESolver.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing DESolver.h
  *
diff --git a/src/tests/DataSet_test.cpp b/src/tests/DataSet_test.cpp
index 86b47b0f3bb7ca3ac2bb1404d14969c56dd19c7d..547035847123c261184733639a394225d4125299 100644
--- a/src/tests/DataSet_test.cpp
+++ b/src/tests/DataSet_test.cpp
@@ -28,6 +28,7 @@
 #include <iostream>
 //#include <boost/filesystem.hpp>
 
+using namespace lib4neuro;
 
 /**
  * Boost testing suite for testing DataSet.h
@@ -101,7 +102,7 @@ BOOST_AUTO_TEST_SUITE(DataSet_test)
 /**
  * Test of add_data_pair method
  */
-    BOOST_AUTO_TEST_CASE(DataSet_add_daata_pair_test) {
+    BOOST_AUTO_TEST_CASE(DataSet_add_data_pair_test) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
         std::vector<double> inp, out;
 
diff --git a/src/tests/ErrorFunctions_test.cpp b/src/tests/ErrorFunctions_test.cpp
index 23aed3ac5b9e5b8fddac32e714bf2487513f457e..8d04ecf602ebd6f18bca8d8a11825764c819a2f7 100644
--- a/src/tests/ErrorFunctions_test.cpp
+++ b/src/tests/ErrorFunctions_test.cpp
@@ -24,11 +24,13 @@
 
 #include "../ErrorFunction/ErrorFunctions.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing ErrorFunction.h
  * doesn't test inherited methods
  */
-BOOST_AUTO_TEST_SUITE(ErrorFunctions_test)
+BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
 
     BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Construction_Test) {
         NeuralNetwork network;
diff --git a/src/tests/NeuralNetworkSum_test.cpp b/src/tests/NeuralNetworkSum_test.cpp
index b2ede50b5240e452b62c2fe99ea759b164d18ced..b51f6aa6d4783d788656351ea9522c9a243afa15 100644
--- a/src/tests/NeuralNetworkSum_test.cpp
+++ b/src/tests/NeuralNetworkSum_test.cpp
@@ -25,6 +25,8 @@
 
 #include "../Network/NeuralNetworkSum.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing NeuralNetworkSum.h
  */
diff --git a/src/tests/NeuralNetwork_test.cpp b/src/tests/NeuralNetwork_test.cpp
index 4980fb3d46c0bef7e5779bcb1e66918c292f7724..82f779f04113a7e330181cd0b17b880879e6e530 100644
--- a/src/tests/NeuralNetwork_test.cpp
+++ b/src/tests/NeuralNetwork_test.cpp
@@ -25,6 +25,8 @@
 
 #include "../Network/NeuralNetwork.h"
 
+using namespace lib4neuro;
+
 struct cerr_redirect {
     cerr_redirect(std::streambuf *new_buffer)
             : old(std::cerr.rdbuf(new_buffer)
diff --git a/src/tests/NeuronBinary_test.cpp b/src/tests/NeuronBinary_test.cpp
index 832ef9ae5107b168b10d5295654d13f8d3c0f6d8..384e07fba850fcc2549fe240d75f96852d6bbdbc 100644
--- a/src/tests/NeuronBinary_test.cpp
+++ b/src/tests/NeuronBinary_test.cpp
@@ -25,6 +25,8 @@
 
 #include "../Neuron/NeuronBinary.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing NeuronBinary.h
  * doesn't test inherited methods
diff --git a/src/tests/NeuronConstant_test.cpp b/src/tests/NeuronConstant_test.cpp
index 16f97d45a08e2a9e9580d2f6d643bee0951543dd..a6fe0f5d3c28c657cd1609ec420120b59de02223 100644
--- a/src/tests/NeuronConstant_test.cpp
+++ b/src/tests/NeuronConstant_test.cpp
@@ -25,6 +25,8 @@
 
 #include "../Neuron/NeuronConstant.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing NeuronConstant.h
  * doesn't test inherited methods
diff --git a/src/tests/NeuronLinear_test.cpp b/src/tests/NeuronLinear_test.cpp
index 74ee617056a0c120503e609ab4f27b4782d4732c..d8126ecaa70d8fcc5ef9e3f5a8377925998d18f0 100644
--- a/src/tests/NeuronLinear_test.cpp
+++ b/src/tests/NeuronLinear_test.cpp
@@ -25,6 +25,8 @@
 
 #include "../Neuron/NeuronLinear.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing NeuronLinear.h
  * doesn't test inherited methods
diff --git a/src/tests/NeuronLogistic_test.cpp b/src/tests/NeuronLogistic_test.cpp
index 3baabd92349bfc4f9173f99d880614a969f386d4..1c43798cb5e6700c72d8f83d620d533171db5fee 100644
--- a/src/tests/NeuronLogistic_test.cpp
+++ b/src/tests/NeuronLogistic_test.cpp
@@ -25,6 +25,8 @@
 
 #include "../Neuron/NeuronLogistic.h"
 
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing NeuronLogistic.h
  * doesn't test inherited methods
diff --git a/src/tests/ParticleSwarm_test.cpp b/src/tests/ParticleSwarm_test.cpp
index f61294f4b175a224f98c49c79844cea87266a202..1c16b9c14e5356efeff562e729e73f89556858a1 100644
--- a/src/tests/ParticleSwarm_test.cpp
+++ b/src/tests/ParticleSwarm_test.cpp
@@ -24,6 +24,9 @@
 #endif
 
 #include "../LearningMethods/ParticleSwarm.h"
+
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing ParticleSwarm.h
  */
diff --git a/src/tests/Particle_test.cpp b/src/tests/Particle_test.cpp
index 590b7b1b1586d9f4307b18b3f18598bc5e31a9c4..4c8b8fa04ea07e150f22df1458e1be270fca429d 100644
--- a/src/tests/Particle_test.cpp
+++ b/src/tests/Particle_test.cpp
@@ -24,6 +24,9 @@
 #endif
 
 #include "../LearningMethods/ParticleSwarm.h"
+
+using namespace lib4neuro;
+
 /**
  * Boost testing suite for testing ParticleSwarm.h
  * TODO