diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index cf5b03cbf4aa3560756d11867fd3a66e82501e76..d02459514e9b091cf04b279955ecc7d0f4d55347 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -73,27 +73,27 @@ if ("${BUILD_LIB}" STREQUAL "yes")
 
     # GFortran linking
     set(GFORT "")
-    if(OpenBLAS_FOUND)
+    if (OpenBLAS_FOUND)
         message("Linking GFortran because of OpenBLAS...")
         set(GFORT gfortran)
-    endif()
+    endif ()
 
-    if(NOT OpenBLAS_LIBRARIES)
+    if (NOT OpenBLAS_LIBRARIES)
         set(OpenBLAS_LIBRARIES "")
-    endif()
+    endif ()
 
-    if(NOT BLAS_LIBRARIES)
+    if (NOT BLAS_LIBRARIES)
         set(BLAS_LIBRARIES "")
-    endif()
+    endif ()
 
-    if(NOT LAPACK_LIBRARIES)
+    if (NOT LAPACK_LIBRARIES)
         set(LAPACK_LIBRARIES "")
-    endif()
+    endif ()
 
     target_link_libraries(
-        lib4neuro
+            lib4neuro
 
-        PRIVATE
+            PRIVATE
             exprtk_wrap
             Threads::Threads
             ${Boost_LIBRARIES}
@@ -105,25 +105,25 @@ if ("${BUILD_LIB}" STREQUAL "yes")
     )
 
     target_include_directories(
-        lib4neuro
+            lib4neuro
 
-        PUBLIC
-        ${ROOT_DIR}/include
+            PUBLIC
+            ${ROOT_DIR}/include
 
-        PRIVATE
-        ${EXPRTK_INCLUDE_DIR}
-        ${SRC_DIR}
-        ${Boost_INCLUDE_DIRS}
-        ${ARMADILLO_INCLUDE_DIR}
+            PRIVATE
+            ${EXPRTK_INCLUDE_DIR}
+            ${SRC_DIR}
+            ${Boost_INCLUDE_DIRS}
+            ${ARMADILLO_INCLUDE_DIR}
     )
 
     set_target_properties(
-        lib4neuro
+            lib4neuro
 
-        PROPERTIES
-        ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
-        LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
-        RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin"
+            PROPERTIES
+            ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
+            LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
+            RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin"
     )
 
     set(PREFIX "")
@@ -139,10 +139,10 @@ if ("${BUILD_LIB}" STREQUAL "yes")
     endif ()
 
     target_include_directories(
-        ${PREFIX}boost_unit_test
+            ${PREFIX}boost_unit_test
 
-        PRIVATE
-        ${Boost_INCLUDE_DIRS}
+            PRIVATE
+            ${Boost_INCLUDE_DIRS}
     )
 
 endif ()
diff --git a/src/CSVReader/CSVReader.cpp b/src/CSVReader/CSVReader.cpp
index 50fcc4dd78a3170a6061aa82da5d635b631715c9..dc0031e51dd5b64758f5bceb5a2356a6e633d7b4 100644
--- a/src/CSVReader/CSVReader.cpp
+++ b/src/CSVReader/CSVReader.cpp
@@ -14,8 +14,10 @@
 
 
 namespace lib4neuro {
-    CSVReader::CSVReader(std::string file_path, std::string delimiter, bool ignore_first_line) {
-        if(!std::experimental::filesystem::exists(file_path)) {
+    CSVReader::CSVReader(std::string file_path,
+                         std::string delimiter,
+                         bool ignore_first_line) {
+        if (!std::experimental::filesystem::exists(file_path)) {
             THROW_RUNTIME_ERROR("The file path \'" + file_path + "\' specified in CSVReader does not exist!");
         }
 
@@ -29,15 +31,17 @@ namespace lib4neuro {
         std::ifstream ifs(this->file_path);
         std::string line;
 
-        if(this->ignore_first_line) {
-            std::getline(ifs, line);
+        if (this->ignore_first_line) {
+            std::getline(ifs,
+                         line);
         }
 
         /* Read single line from the file */
-        while(std::getline(ifs, line)) {
+        while (std::getline(ifs,
+                            line)) {
 
             /* Ignore empty line */
-            if(line == "") {
+            if (line == "") {
                 continue;
             }
 
@@ -45,8 +49,10 @@ namespace lib4neuro {
             size_t last = 0;
             size_t next = 0;
             std::vector<std::string> separated_line;
-            while ((next = line.find(this->delimiter, last)) != std::string::npos) {
-                separated_line.emplace_back(line.substr(last, next - last));
+            while ((next = line.find(this->delimiter,
+                                     last)) != std::string::npos) {
+                separated_line.emplace_back(line.substr(last,
+                                                        next - last));
                 last = next + 1;
             }
             separated_line.emplace_back(line.substr(last));
@@ -63,8 +69,8 @@ namespace lib4neuro {
     }
 
     void CSVReader::print_data() {
-        for(auto line : this->data) {
-            for(auto e : line) {
+        for (auto line : this->data) {
+            for (auto e : line) {
                 std::cout << e << " ";
             }
             std::cout << std::endl;
@@ -76,7 +82,7 @@ namespace lib4neuro {
 
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_set_contents;
 
-        if(this->data.empty()) {
+        if (this->data.empty()) {
             THROW_LOGIC_ERROR("DataSet can not be created as there were no data read beforehand! Did you forget to call "
                               "the method 'read()'?");
         }
@@ -90,12 +96,15 @@ namespace lib4neuro {
                 try {
                     /* Remove remaining spaces */
                     s = line.at(ind);
-                    boost::algorithm::erase_all(s, " ");
+                    boost::algorithm::erase_all(s,
+                                                " ");
 
                     /* Strip BOM */
                     // TODO solve in another way - work properly with different encodings!
-                    boost::algorithm::erase_all(s, "\uEFBBBF");  // UTF-8
-                    boost::algorithm::erase_all(s, "\uFEFF");  // UTF-16
+                    boost::algorithm::erase_all(s,
+                                                "\uEFBBBF");  // UTF-8
+                    boost::algorithm::erase_all(s,
+                                                "\uFEFF");  // UTF-16
 
                     /* Check, if the string is a number */
                     auto tmp = boost::lexical_cast<double>(s);
@@ -117,7 +126,8 @@ namespace lib4neuro {
                 output.emplace_back(std::stod(line.at(ind)));
             }
 
-            data_set_contents.emplace_back(std::make_pair(input, output));
+            data_set_contents.emplace_back(std::make_pair(input,
+                                                          output));
         }
 
         return std::make_shared<DataSet>(DataSet(&data_set_contents));
diff --git a/src/CSVReader/CSVReader.h b/src/CSVReader/CSVReader.h
index 01661ad78bfbb924ea4f5bfd95b7fed4259895c0..69877fe888d97d8020fc612b0eac27cc3c98cb79 100644
--- a/src/CSVReader/CSVReader.h
+++ b/src/CSVReader/CSVReader.h
@@ -13,7 +13,7 @@ namespace lib4neuro {
      *
      */
     class CSVReader {
-    //TODO make more efficient - possibly with external library?
+        //TODO make more efficient - possibly with external library?
 
     private:
 
@@ -50,7 +50,9 @@ namespace lib4neuro {
          * @param delimiter
          * @param ignore_first_line
          */
-        LIB4NEURO_API CSVReader(std::string file_path, std::string delimiter=",", bool ignore_first_line=false);
+        LIB4NEURO_API CSVReader(std::string file_path,
+                                std::string delimiter = ",",
+                                bool ignore_first_line = false);
 
         /**
          *
@@ -70,7 +72,7 @@ namespace lib4neuro {
          * @return
          */
         LIB4NEURO_API std::shared_ptr<DataSet> get_data_set(std::vector<unsigned int>* input_col_indices,
-                std::vector<unsigned int>* output_col_indices);
+                                                            std::vector<unsigned int>* output_col_indices);
 
         /**
          *
diff --git a/src/CrossValidator/CrossValidator.cpp b/src/CrossValidator/CrossValidator.cpp
index 7af7faa2d4da174c8a5dce6686494139ef626eaf..0e82db52316712a22c08a9429aefd42da2acae09 100644
--- a/src/CrossValidator/CrossValidator.cpp
+++ b/src/CrossValidator/CrossValidator.cpp
@@ -3,68 +3,76 @@
 #include "message.h"
 
 namespace lib4neuro {
-    LIB4NEURO_API CrossValidator::CrossValidator(LearningMethod* optimizer, ErrorFunction* ef) {
+    LIB4NEURO_API CrossValidator::CrossValidator(LearningMethod* optimizer,
+                                                 ErrorFunction* ef) {
         this->optimizer = optimizer;
         this->ef = ef;
     }
 
-    LIB4NEURO_API void CrossValidator::run_k_fold_test(unsigned int k, unsigned int tests_number, std::ofstream* results_file_path) {
+    LIB4NEURO_API void CrossValidator::run_k_fold_test(unsigned int k,
+                                                       unsigned int tests_number,
+                                                       std::ofstream* results_file_path) {
         //TODO do not duplicate code - write in a more elegant way
-        NeuralNetwork *net = this->ef->get_network_instance();
+        NeuralNetwork* net = this->ef->get_network_instance();
 
         double cv_err_sum = 0;
 
-        for(unsigned int i = 0; i < tests_number; i++) {
-            COUT_INFO("Cross-validation run " << i+1 << std::endl);
-            *results_file_path << "Cross-validation run " << i+1 << std::endl;
+        for (unsigned int i = 0; i < tests_number; i++) {
+            COUT_INFO("Cross-validation run " << i + 1 << std::endl);
+            *results_file_path << "Cross-validation run " << i + 1 << std::endl;
 
-            this->ef->divide_data_train_test(1.0/k);
-            *results_file_path << "Number of train data points: " << this->ef->get_dataset()->get_n_elements() << std::endl;
-            *results_file_path << "Number of test data points: " << this->ef->get_test_dataset()->get_n_elements() << std::endl;
+            this->ef->divide_data_train_test(1.0 / k);
+            *results_file_path << "Number of train data points: " << this->ef->get_dataset()->get_n_elements()
+                               << std::endl;
+            *results_file_path << "Number of test data points: " << this->ef->get_test_dataset()->get_n_elements()
+                               << std::endl;
             net->randomize_parameters();
-            net->scale_parameters( 1.0 / (net->get_n_weights() + net->get_n_biases()));
-            this->optimizer->optimize(*this->ef, results_file_path);
+            net->scale_parameters(1.0 / (net->get_n_weights() + net->get_n_biases()));
+            this->optimizer->optimize(*this->ef,
+                                      results_file_path);
 
             /* Error evaluation and writing */
             double err = this->ef->eval_on_test_data(results_file_path);
             cv_err_sum += err;
-            COUT_INFO("CV error (run " << i+1 << "): " << err << std::endl << std::endl);
+            COUT_INFO("CV error (run " << i + 1 << "): " << err << std::endl << std::endl);
 
             this->ef->return_full_data_set_for_training();
         }
 
-        COUT_INFO("CV error mean: " << cv_err_sum/tests_number << std::endl);
-        *results_file_path << "CV error mean: " << cv_err_sum/tests_number << std::endl;
+        COUT_INFO("CV error mean: " << cv_err_sum / tests_number << std::endl);
+        *results_file_path << "CV error mean: " << cv_err_sum / tests_number << std::endl;
     }
 
-    LIB4NEURO_API void CrossValidator::run_k_fold_test(unsigned int k, unsigned int tests_number, std::string results_file_path) {
-        NeuralNetwork *net = this->ef->get_network_instance();
+    LIB4NEURO_API void CrossValidator::run_k_fold_test(unsigned int k,
+                                                       unsigned int tests_number,
+                                                       std::string results_file_path) {
+        NeuralNetwork* net = this->ef->get_network_instance();
 
         double cv_err_sum = 0;
 
-        for(unsigned int i = 0; i < tests_number; i++) {
-            COUT_INFO("Cross-validation run " << i+1 << std::endl);
+        for (unsigned int i = 0; i < tests_number; i++) {
+            COUT_INFO("Cross-validation run " << i + 1 << std::endl);
 
-            this->ef->divide_data_train_test(1.0/k);
+            this->ef->divide_data_train_test(1.0 / k);
             COUT_DEBUG("Number of train data points: " << this->ef->get_dataset()->get_n_elements() << std::endl);
             COUT_DEBUG("Number of test data points: " << this->ef->get_test_dataset()->get_n_elements() << std::endl);
             net->randomize_parameters();
-            net->scale_parameters( 1.0 / (net->get_n_weights() + net->get_n_biases()));
+            net->scale_parameters(1.0 / (net->get_n_weights() + net->get_n_biases()));
             this->optimizer->optimize(*this->ef);
 
             /* Error evaluation and writing */
             double err;
-            if(results_file_path == "") {
+            if (results_file_path == "") {
                 err = this->ef->eval_on_test_data();
             } else {
                 err = this->ef->eval_on_test_data(results_file_path + "_cv" + std::to_string(i) + ".dat");
             }
             cv_err_sum += err;
-            COUT_INFO("CV error (run " << i+1 << "): " << err << std::endl << std::endl);
+            COUT_INFO("CV error (run " << i + 1 << "): " << err << std::endl << std::endl);
 
             this->ef->return_full_data_set_for_training();
         }
 
-        COUT_INFO("CV error mean: " << cv_err_sum/tests_number << std::endl);
+        COUT_INFO("CV error mean: " << cv_err_sum / tests_number << std::endl);
     }
 }
diff --git a/src/CrossValidator/CrossValidator.h b/src/CrossValidator/CrossValidator.h
index 1dc1da6b30f73d747f28cca01e8d1ae9b8aec698..13b34650cc074e18df05bf42ae95aacc2a40a287 100644
--- a/src/CrossValidator/CrossValidator.h
+++ b/src/CrossValidator/CrossValidator.h
@@ -31,7 +31,8 @@ namespace lib4neuro {
          * @param optimizer
          * @param data_set
          */
-        LIB4NEURO_API CrossValidator(LearningMethod* optimizer, ErrorFunction* ef);
+        LIB4NEURO_API CrossValidator(LearningMethod* optimizer,
+                                     ErrorFunction* ef);
 
         /**
          *
@@ -40,7 +41,9 @@ namespace lib4neuro {
          * @param results_file_path
          */
         LIB4NEURO_API void
-        run_k_fold_test(unsigned int k, unsigned int test_number, std::string results_file_path = "");
+        run_k_fold_test(unsigned int k,
+                        unsigned int test_number,
+                        std::string results_file_path = "");
 
         /**
          *
@@ -48,7 +51,9 @@ namespace lib4neuro {
          * @param tests_number
          * @param results_file_path
          */
-        LIB4NEURO_API void run_k_fold_test(unsigned int k, unsigned int tests_number, std::ofstream* results_file_path);
+        LIB4NEURO_API void run_k_fold_test(unsigned int k,
+                                           unsigned int tests_number,
+                                           std::ofstream* results_file_path);
     };
 }
 
diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index b2903a38103f6b5f240269d7503e4766d50a7bb3..83d72b4952374817bc20764c15265950b2b0b276 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -19,13 +19,14 @@ namespace lib4neuro {
 
     DataSet::DataSet(std::string file_path) {
         std::ifstream ifs(file_path);
-        if(ifs.is_open()) {
+        if (ifs.is_open()) {
             try {
                 boost::archive::text_iarchive ia(ifs);
                 ia >> *this;
-            }catch(boost::archive::archive_exception& e) {
-                THROW_RUNTIME_ERROR("Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
-                                                                               "the serialized DataSet.");
+            } catch (boost::archive::archive_exception& e) {
+                THROW_RUNTIME_ERROR(
+                        "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
+                                                                   "the serialized DataSet.");
             }
             ifs.close();
         } else {
@@ -43,7 +44,7 @@ namespace lib4neuro {
         this->input_dim = this->data[0].first.size();
         this->output_dim = this->data[0].second.size();
 
-        if(ns) {
+        if (ns) {
             std::shared_ptr<NormalizationStrategy> ns_tmp;
             ns_tmp.reset(ns);
             this->normalization_strategy = ns_tmp;
@@ -63,17 +64,20 @@ namespace lib4neuro {
         this->input_dim = 1;
         this->output_dim = 1;
 
-        if(ns) {
+        if (ns) {
             std::shared_ptr<NormalizationStrategy> ns_tmp(ns);
             this->normalization_strategy = ns_tmp;
         }
 
-        this->add_isotropic_data(lower_bound, upper_bound, size, output);
+        this->add_isotropic_data(lower_bound,
+                                 upper_bound,
+                                 size,
+                                 output);
     }
 
-    DataSet::DataSet(std::vector<double> &bounds,
+    DataSet::DataSet(std::vector<double>& bounds,
                      unsigned int no_elems_in_one_dim,
-                     std::vector<double> (*output_func)(std::vector<double> &),
+                     std::vector<double> (* output_func)(std::vector<double>&),
                      unsigned int output_dim,
                      NormalizationStrategy* ns) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
@@ -82,17 +86,20 @@ namespace lib4neuro {
         this->output_dim = output_dim;
         this->n_elements = 0;
 
-        if(ns) {
+        if (ns) {
             std::shared_ptr<NormalizationStrategy> ns_tmp;
             ns_tmp.reset(ns);
             this->normalization_strategy = ns_tmp;
         }
 
-        this->add_isotropic_data(bounds, no_elems_in_one_dim, output_func);
+        this->add_isotropic_data(bounds,
+                                 no_elems_in_one_dim,
+                                 output_func);
     }
 
-    void DataSet::add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs) {
-        if(this->n_elements == 0 && this->input_dim == 0 && this->output_dim == 0) {
+    void DataSet::add_data_pair(std::vector<double>& inputs,
+                                std::vector<double>& outputs) {
+        if (this->n_elements == 0 && this->input_dim == 0 && this->output_dim == 0) {
             this->input_dim = inputs.size();
             this->output_dim = outputs.size();
         }
@@ -104,10 +111,14 @@ namespace lib4neuro {
         }
 
         this->n_elements++;
-        this->data.emplace_back(std::make_pair(inputs, outputs));
+        this->data.emplace_back(std::make_pair(inputs,
+                                               outputs));
     }
 
-    void DataSet::add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output) {
+    void DataSet::add_isotropic_data(double lower_bound,
+                                     double upper_bound,
+                                     unsigned int size,
+                                     double output) {
 
         if (this->input_dim != 1 || this->output_dim != 1) {
             THROW_RUNTIME_ERROR("Cannot add data with dimensionality 1:1 when the data set "
@@ -115,7 +126,7 @@ namespace lib4neuro {
         }
 
         double frac;
-        if(size < 1) {
+        if (size < 1) {
             THROW_INVALID_ARGUMENT_ERROR("Size of added data has to be >=1 !");
         } else if (size == 1) {
             frac = 1;
@@ -129,20 +140,22 @@ namespace lib4neuro {
 
         for (unsigned int i = 0; i < size; ++i) {
             inp = {frac * i};
-            this->data.emplace_back(std::make_pair(inp, out));
+            this->data.emplace_back(std::make_pair(inp,
+                                                   out));
         }
 
         this->n_elements += size;
     }
 
-    void DataSet::add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim,
-                                     std::vector<double> (*output_func)(std::vector<double> &)) {
+    void DataSet::add_isotropic_data(std::vector<double>& bounds,
+                                     unsigned int no_elems_in_one_dim,
+                                     std::vector<double> (* output_func)(std::vector<double>&)) {
         // TODO add check of dataset dimensions
 
         std::vector<std::vector<double>> grid;
         std::vector<double> tmp;
         double frac;
-        if(no_elems_in_one_dim < 1) {
+        if (no_elems_in_one_dim < 1) {
             THROW_INVALID_ARGUMENT_ERROR("Number of elements in one dimension has to be >=1 !");
         }
 
@@ -150,7 +163,7 @@ namespace lib4neuro {
             if (no_elems_in_one_dim == 1) {
                 frac = 1;
             } else {
-                frac = (bounds[i] - bounds[i+1]) / (no_elems_in_one_dim - 1);
+                frac = (bounds[i] - bounds[i + 1]) / (no_elems_in_one_dim - 1);
             }
 
             tmp.clear();
@@ -165,11 +178,12 @@ namespace lib4neuro {
 
         for (auto vec : grid) {
             this->n_elements++;
-            this->data.emplace_back(std::make_pair(vec, output_func(vec)));
+            this->data.emplace_back(std::make_pair(vec,
+                                                   output_func(vec)));
         }
     }
 
-    std::vector<std::pair<std::vector<double>, std::vector<double>>> *DataSet::get_data() {
+    std::vector<std::pair<std::vector<double>, std::vector<double>>>* DataSet::get_data() {
         return &(this->data);
     }
 
@@ -208,7 +222,7 @@ namespace lib4neuro {
     void DataSet::store_text(std::string file_path) {
         std::ofstream ofs(file_path);
 
-        if(!ofs.is_open()) {
+        if (!ofs.is_open()) {
             THROW_RUNTIME_ERROR("File " + file_path + " couldn't be open!");
         } else {
             boost::archive::text_oarchive oa(ofs);
@@ -236,7 +250,7 @@ namespace lib4neuro {
     void DataSet::store_data_text(std::string file_path) {
         std::ofstream ofs(file_path);
 
-        if(!ofs.is_open()) {
+        if (!ofs.is_open()) {
             THROW_RUNTIME_ERROR("File " + file_path + " couldn't be open!");
         } else {
             for (auto e : this->data) {
@@ -256,11 +270,11 @@ namespace lib4neuro {
     }
 
     template<class T>
-    std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>> *v) {
+    std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>>* v) {
         std::vector<std::vector<double>> v_combined_old, v_combined, v_tmp;
         std::vector<double> tmp;
 
-        for (const auto &e : v->at(0)) {
+        for (const auto& e : v->at(0)) {
             tmp = {e};
             v_combined.emplace_back(tmp);
         }
@@ -269,13 +283,15 @@ namespace lib4neuro {
             v_combined_old = v_combined;
             v_combined.clear();
 
-            for (const auto &e : v->at(i)) {
-                for (const auto &vec : v_combined_old) {
+            for (const auto& e : v->at(i)) {
+                for (const auto& vec : v_combined_old) {
                     tmp = vec;
                     tmp.emplace_back(e);
 
                     /* Add only unique elements */
-                    if (std::find(v_combined.begin(), v_combined.end(), tmp) == v_combined.end()) {
+                    if (std::find(v_combined.begin(),
+                                  v_combined.end(),
+                                  tmp) == v_combined.end()) {
                         v_combined.emplace_back(tmp);
                     }
                 }
@@ -287,25 +303,28 @@ namespace lib4neuro {
 
     void DataSet::normalize() {
         this->normalized = false;
-        if(!this->normalization_strategy) {
+        if (!this->normalization_strategy) {
             THROW_INVALID_ARGUMENT_ERROR("There is no normalization strategy given for this data set, so it can not be "
                                          "normalized!");
         }
 
         /* Find maximum and minimum values */
-        if(this->max_min_inp_val.empty()) {
+        if (this->max_min_inp_val.empty()) {
             this->max_min_inp_val.emplace_back(this->data.at(0).first.at(0));
             this->max_min_inp_val.emplace_back(this->data.at(0).first.at(0));
         }
 
         double tmp, tmp2;
-        for(auto pair : this->data) {
+        for (auto pair : this->data) {
             /* Finding maximum */
             //TODO make more efficiently
-            tmp = *std::max_element(pair.first.begin(), pair.first.end());
-            tmp2 = *std::max_element(pair.second.begin(), pair.second.end());
+            tmp = *std::max_element(pair.first.begin(),
+                                    pair.first.end());
+            tmp2 = *std::max_element(pair.second.begin(),
+                                     pair.second.end());
 
-            tmp = std::max(tmp, tmp2);
+            tmp = std::max(tmp,
+                           tmp2);
 
             /* Testing for a new maxima */
             if (tmp > this->max_min_inp_val.at(0)) {
@@ -313,10 +332,13 @@ namespace lib4neuro {
             }
 
             /* Finding minimum */
-            tmp = *std::min_element(pair.first.begin(), pair.first.end());
-            tmp2 = *std::min_element(pair.second.begin(), pair.second.end());
+            tmp = *std::min_element(pair.first.begin(),
+                                    pair.first.end());
+            tmp2 = *std::min_element(pair.second.begin(),
+                                     pair.second.end());
 
-            tmp = std::min(tmp, tmp2);
+            tmp = std::min(tmp,
+                           tmp2);
 
             /* Testing for a new minima */
             if (tmp < this->max_min_inp_val.at(1)) {
@@ -325,13 +347,17 @@ namespace lib4neuro {
         }
 
         /* Normalize every number in the data set */
-        for(auto& pair : this->data) {
-            for(auto& v : pair.first) {
-                v = this->normalization_strategy->normalize(v, this->max_min_inp_val.at(0), this->max_min_inp_val.at(1));
+        for (auto& pair : this->data) {
+            for (auto& v : pair.first) {
+                v = this->normalization_strategy->normalize(v,
+                                                            this->max_min_inp_val.at(0),
+                                                            this->max_min_inp_val.at(1));
             }
 
-            for(auto& v : pair.second) {
-                v = this->normalization_strategy->normalize(v, this->max_min_inp_val.at(0), this->max_min_inp_val.at(1));
+            for (auto& v : pair.second) {
+                v = this->normalization_strategy->normalize(v,
+                                                            this->max_min_inp_val.at(0),
+                                                            this->max_min_inp_val.at(1));
             }
         }
 
@@ -339,21 +365,25 @@ namespace lib4neuro {
 
     }
 
-    double DataSet::get_normalized_value(double val){
-        if(!this->normalized || !this->normalization_strategy) {
+    double DataSet::get_normalized_value(double val) {
+        if (!this->normalized || !this->normalization_strategy) {
             return val;
         }
-        return this->normalization_strategy->normalize(val, this->max_min_inp_val.at(0), this->max_min_inp_val.at(1));
+        return this->normalization_strategy->normalize(val,
+                                                       this->max_min_inp_val.at(0),
+                                                       this->max_min_inp_val.at(1));
     }
 
-    void DataSet::get_input(std::vector<double> &d, size_t idx){
+    void DataSet::get_input(std::vector<double>& d,
+                            size_t idx) {
         assert(d.size() == this->data[idx].first.size());
         for (size_t j = 0; j < this->data[idx].first.size(); ++j) {
             d[j] = this->data[idx].first[j];
         }
     }
 
-    void DataSet::get_output(std::vector<double> &d, size_t idx){
+    void DataSet::get_output(std::vector<double>& d,
+                             size_t idx) {
         assert(d.size() == this->data[idx].second.size());
         for (size_t j = 0; j < this->data[idx].second.size(); ++j) {
             d[j] = this->data[idx].second[j];
@@ -364,15 +394,15 @@ namespace lib4neuro {
         std::vector<double> tmp_inp(this->data.at(0).first.size());
         std::vector<double> tmp_out(this->data.at(0).second.size());
 
-        for(auto& pair: this->data) {
-            for(size_t i=0; i < pair.first.size(); i++) {
+        for (auto& pair: this->data) {
+            for (size_t i = 0; i < pair.first.size(); i++) {
                 tmp_inp.at(i) = this->normalization_strategy->de_normalize(pair.first.at(i));
             }
             pair.first = tmp_inp;
         }
 
-        for(auto& pair: this->data) {
-            for(size_t i=0; i < pair.second.size(); i++) {
+        for (auto& pair: this->data) {
+            for (size_t i = 0; i < pair.second.size(); i++) {
                 tmp_out.at(i) = this->normalization_strategy->de_normalize(pair.second.at(i));
             }
             pair.second = tmp_out;
@@ -382,7 +412,8 @@ namespace lib4neuro {
         this->max_min_inp_val.clear();
     }
 
-    void DataSet::de_normalize_single(std::vector<double> &d1, std::vector<double> &d2){
+    void DataSet::de_normalize_single(std::vector<double>& d1,
+                                      std::vector<double>& d2) {
         assert(d1.size() == d2.size());
         for (size_t j = 0; j < d1.size(); ++j) {
             d2[j] = this->normalization_strategy->de_normalize(d1[j]);
@@ -394,7 +425,7 @@ namespace lib4neuro {
     }
 
     void DataSet::set_normalization_strategy(NormalizationStrategy* ns) {
-        if( ns ){
+        if (ns) {
             this->normalization_strategy.reset(ns);
         }
     }
@@ -421,20 +452,23 @@ namespace lib4neuro {
             std::vector<std::pair<std::vector<double>, std::vector<double>>> newData;
             srand(time(NULL));  //TODO use Mersen twister from Boost
 
-            size_t n_chosen = rand() % std::min(max, this->data.size())+1;
+            size_t n_chosen = rand() % std::min(max,
+                                                this->data.size()) + 1;
             n_chosen = max;
             std::vector<size_t> chosens;
             size_t chosen;
 
             for (int i = 0; i < n_chosen; i++) {
                 chosen = rand() % this->data.size();
-                auto it = std::find(chosens.begin(), chosens.end(), chosen);
+                auto it = std::find(chosens.begin(),
+                                    chosens.end(),
+                                    chosen);
 
                 if (it != chosens.end()) {
                     i--;
                 } else {
                     newData.push_back(this->data.at(chosen));
-                    chosens.push_back( chosen );
+                    chosens.push_back(chosen);
                 }
             }
 
diff --git a/src/DataSet/DataSet.h b/src/DataSet/DataSet.h
index 34c4527f7e5469a3ffe6a596dbc42b3b4b484c38..b273666923bb007fb1f919acf5227571e2aeb241 100644
--- a/src/DataSet/DataSet.h
+++ b/src/DataSet/DataSet.h
@@ -60,7 +60,7 @@ namespace lib4neuro {
          * @return
          */
         template<class T>
-        std::vector<std::vector<T>> cartesian_product(const std::vector<std::vector<T>> *v);
+        std::vector<std::vector<T>> cartesian_product(const std::vector<std::vector<T>>* v);
 
         /**
          *
@@ -92,7 +92,7 @@ namespace lib4neuro {
          * Constructor accepting data vector
          * @param data_ptr Pointer to the vector containing data
          */
-        LIB4NEURO_API DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>> *data_ptr,
+        LIB4NEURO_API DataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>>* data_ptr,
                               NormalizationStrategy* ns = nullptr);
 
         /**
@@ -122,9 +122,9 @@ namespace lib4neuro {
          * @param output_func
          * @param output_dim
          */
-        LIB4NEURO_API DataSet(std::vector<double> &bounds,
+        LIB4NEURO_API DataSet(std::vector<double>& bounds,
                               unsigned int no_elems_in_one_dim,
-                              std::vector<double> (*output_func)(std::vector<double> &),
+                              std::vector<double> (* output_func)(std::vector<double>&),
                               unsigned int output_dim,
                               NormalizationStrategy* ns = nullptr);
 
@@ -150,14 +150,15 @@ namespace lib4neuro {
          * Getter for the data structure
          * @return Vector of data
          */
-        LIB4NEURO_API std::vector<std::pair<std::vector<double>, std::vector<double>>> *get_data();
+        LIB4NEURO_API std::vector<std::pair<std::vector<double>, std::vector<double>>>* get_data();
 
         /**
          * Adds a new pair of data to the data set
          * @param inputs Vector of input data
          * @param outputs Vector of output data corresponding to the input data
          */
-        LIB4NEURO_API void add_data_pair(std::vector<double> &inputs, std::vector<double> &outputs);
+        LIB4NEURO_API void add_data_pair(std::vector<double>& inputs,
+                                         std::vector<double>& outputs);
 
         //TODO expand method to generate multiple data types - chebyshev etc.
         /**
@@ -172,7 +173,10 @@ namespace lib4neuro {
          * @param size Number of input-output pairs generated
          * @param output Constant output value
          */
-        LIB4NEURO_API void add_isotropic_data(double lower_bound, double upper_bound, unsigned int size, double output);
+        LIB4NEURO_API void add_isotropic_data(double lower_bound,
+                                              double upper_bound,
+                                              unsigned int size,
+                                              double output);
 
         /**
          * Adds a new data with input values equidistantly positioned
@@ -186,8 +190,9 @@ namespace lib4neuro {
          * @param size Number of input-output pairs generated
          * @param output_func Function determining output value
          */
-        LIB4NEURO_API void add_isotropic_data(std::vector<double> &bounds, unsigned int no_elems_in_one_dim,
-                                              std::vector<double> (*output_func)(std::vector<double> &));
+        LIB4NEURO_API void add_isotropic_data(std::vector<double>& bounds,
+                                              unsigned int no_elems_in_one_dim,
+                                              std::vector<double> (* output_func)(std::vector<double>&));
 
         //TODO Chebyshev - ch. interpolation points, i-th point = cos(i*alpha) from 0 to pi
 
@@ -237,21 +242,24 @@ namespace lib4neuro {
          * @param d1
          * @param d2
          */
-        LIB4NEURO_API void de_normalize_single(std::vector<double> &d1, std::vector<double> &d2);
+        LIB4NEURO_API void de_normalize_single(std::vector<double>& d1,
+                                               std::vector<double>& d2);
 
         /**
          * stores the @idx-th input in the vector @d
          * @param d
          * @param idx
          */
-        LIB4NEURO_API void get_input(std::vector<double> &d, size_t idx);
+        LIB4NEURO_API void get_input(std::vector<double>& d,
+                                     size_t idx);
 
         /**
          * stores the @idx-th output in the vector @d
          * @param d
          * @param idx
          */
-        LIB4NEURO_API void get_output(std::vector<double> &d, size_t idx);
+        LIB4NEURO_API void get_output(std::vector<double>& d,
+                                      size_t idx);
 
         /**
          *
@@ -284,7 +292,8 @@ namespace lib4neuro {
          * @param max
          * @return
          */
-        LIB4NEURO_API  std::vector<std::pair<std::vector<double>, std::vector<double>>> get_random_data_batch(size_t max);
+        LIB4NEURO_API std::vector<std::pair<std::vector<double>, std::vector<double>>>
+        get_random_data_batch(size_t max);
     };
 }
 #endif //INC_4NEURO_DATASET_H
diff --git a/src/DataSet/DataSetSerialization.h b/src/DataSet/DataSetSerialization.h
index 94488ff40a5bf971a3087163b884c06a20910049..89a8a8beba57f9fd0ca647b126a83b4c7acc938d 100644
--- a/src/DataSet/DataSetSerialization.h
+++ b/src/DataSet/DataSetSerialization.h
@@ -18,7 +18,9 @@ BOOST_CLASS_EXPORT_KEY(lib4neuro::DataSet);
 namespace lib4neuro {
     struct DataSet::access {
         template<class Archive>
-        static void serialize(Archive &ar, DataSet &ds, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              DataSet& ds,
+                              const unsigned int version) {
             ar & ds.n_elements;
             ar & ds.input_dim;
             ar & ds.output_dim;
@@ -40,9 +42,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::DataSet & ds, const unsigned int version)
-        {
-            lib4neuro::DataSet::access::serialize(ar, ds, version);
+        void serialize(Archive& ar,
+                       lib4neuro::DataSet& ds,
+                       const unsigned int version) {
+            lib4neuro::DataSet::access::serialize(ar,
+                                                  ds,
+                                                  version);
         }
 
     } // namespace serialization
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 7df449cc4a002db3437d0a6cb639a00098b0de9a..7b56e1bfa2b5bf2d979885475784f758f6882ca3 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -27,7 +27,8 @@ namespace lib4neuro {
 
         /* Choose random subset of the DataSet for training and the remaining part for validation */
         boost::random::mt19937 gen;
-        boost::random::uniform_int_distribution<> dist(0, ds_size - 1);
+        boost::random::uniform_int_distribution<> dist(0,
+                                                       ds_size - 1);
 
         size_t test_set_size = ceil(ds_size * percent_test);
 
@@ -36,7 +37,9 @@ namespace lib4neuro {
         for (unsigned int i = 0; i < test_set_size; i++) {
             test_indices.emplace_back(dist(gen));
         }
-        std::sort(test_indices.begin(), test_indices.end(), std::greater<unsigned int>());
+        std::sort(test_indices.begin(),
+                  test_indices.end(),
+                  std::greater<unsigned int>());
 
         std::vector<std::pair<std::vector<double>, std::vector<double>>> test_data, train_data;
 
@@ -52,10 +55,12 @@ namespace lib4neuro {
         }
 
         /* Re-initialize data set for training */
-        this->ds = new DataSet(&train_data, this->ds_full->get_normalization_strategy());
+        this->ds = new DataSet(&train_data,
+                               this->ds_full->get_normalization_strategy());
 
         /* Initialize test data */
-        this->ds_test = new DataSet(&test_data, this->ds_full->get_normalization_strategy());
+        this->ds_test = new DataSet(&test_data,
+                                    this->ds_full->get_normalization_strategy());
     }
 
     void ErrorFunction::return_full_data_set_for_training() {
@@ -72,8 +77,8 @@ namespace lib4neuro {
         return this->ds_test;
     }
 
-    std::vector<double>  ErrorFunction::get_parameters() {
-        std::vector<double>  output(this->net->get_n_weights() + this->net->get_n_biases());
+    std::vector<double> ErrorFunction::get_parameters() {
+        std::vector<double> output(this->net->get_n_weights() + this->net->get_n_biases());
 
         size_t i = 0;
 
@@ -90,7 +95,8 @@ namespace lib4neuro {
         return output;
     }
 
-    MSE::MSE(NeuralNetwork* net, DataSet* ds) {
+    MSE::MSE(NeuralNetwork* net,
+             DataSet* ds) {
         this->net = net;
         this->ds = ds;
         this->dimension = net->get_n_weights() + net->get_n_biases();
@@ -98,15 +104,17 @@ namespace lib4neuro {
 
     double MSE::eval_on_single_input(std::vector<double>* input,
                                      std::vector<double>* output,
-                                     std::vector<double>*  weights) {
+                                     std::vector<double>* weights) {
         std::vector<double> predicted_output(this->get_network_instance()->get_n_outputs());
-        this->net->eval_single(*input, predicted_output, weights);
+        this->net->eval_single(*input,
+                               predicted_output,
+                               weights);
         double result = 0;
         double val;
 
-        for(size_t i = 0; i < output->size(); i++) {
+        for (size_t i = 0; i < output->size(); i++) {
             val = output->at(i) - predicted_output.at(i);
-            result += val*val;
+            result += val * val;
         }
 
         return std::sqrt(result);
@@ -114,7 +122,7 @@ namespace lib4neuro {
 
     double MSE::eval_on_data_set(lib4neuro::DataSet* data_set,
                                  std::ofstream* results_file_path,
-                                 std::vector<double>*  weights,
+                                 std::vector<double>* weights,
                                  bool denormalize_data,
                                  bool verbose) {
         size_t dim_in = data_set->get_input_dim();
@@ -170,7 +178,7 @@ namespace lib4neuro {
             std::stringstream ss_input;
             std::string separator = "";
             for (auto j = 0; j < dim_in; j++) {
-                if(denormalize_data) {
+                if (denormalize_data) {
                     denormalized_real_input = data_set->get_normalization_strategy()->de_normalize(data->at(i).first.at(j));
                 } else {
                     denormalized_real_input = data->at(i).first.at(j);
@@ -178,7 +186,7 @@ namespace lib4neuro {
                 ss_input << separator << denormalized_real_input;
                 separator = ",";
             }
-            if(denormalize_data) {
+            if (denormalize_data) {
                 denormalized_real_input = data_set->get_normalization_strategy()->de_normalize(data->at(i).first.back());
             } else {
                 denormalized_real_input = data->at(i).first.back();
@@ -256,7 +264,7 @@ namespace lib4neuro {
 
     double MSE::eval_on_data_set(DataSet* data_set,
                                  std::string results_file_path,
-                                 std::vector<double>*  weights,
+                                 std::vector<double>* weights,
                                  bool verbose) {
         std::ofstream ofs(results_file_path);
         if (ofs.is_open()) {
@@ -272,7 +280,7 @@ namespace lib4neuro {
     }
 
     double MSE::eval_on_data_set(DataSet* data_set,
-                                 std::vector<double>*  weights,
+                                 std::vector<double>* weights,
                                  bool verbose) {
         return this->eval_on_data_set(data_set,
                                       nullptr,
@@ -291,7 +299,7 @@ namespace lib4neuro {
                                       verbose);
     }
 
-    double MSE::eval_on_test_data(std::vector<double>*  weights,
+    double MSE::eval_on_test_data(std::vector<double>* weights,
                                   bool verbose) {
         return this->eval_on_data_set(this->ds_test,
                                       weights,
@@ -299,7 +307,7 @@ namespace lib4neuro {
     }
 
     double MSE::eval_on_test_data(std::string results_file_path,
-                                  std::vector<double>*  weights,
+                                  std::vector<double>* weights,
                                   bool verbose) {
         return this->eval_on_data_set(this->ds_test,
                                       results_file_path,
@@ -308,7 +316,7 @@ namespace lib4neuro {
     }
 
     double MSE::eval_on_test_data(std::ofstream* results_file_path,
-                                  std::vector<double>*  weights,
+                                  std::vector<double>* weights,
                                   bool verbose) {
         return this->eval_on_data_set(this->ds_test,
                                       results_file_path,
@@ -335,7 +343,8 @@ namespace lib4neuro {
 
         for (auto el: *data) {  // Iterate through every element in the test set
 
-            this->net->eval_single(el.first, error_derivative,
+            this->net->eval_single(el.first,
+                                   error_derivative,
                                    &params);  // Compute the net output and store it into 'output' variable
 
             for (size_t j = 0; j < dim_out; ++j) {
@@ -356,7 +365,9 @@ namespace lib4neuro {
         //TODO maybe move to the general ErrorFunction
         //TODO check input vector sizes - they HAVE TO be allocated before calling this function
 
-        return -this->eval_on_single_input(input, output, parameters);
+        return -this->eval_on_single_input(input,
+                                           output,
+                                           parameters);
     }
 
     void MSE::calculate_residual_gradient(std::vector<double>* input,
@@ -378,16 +389,20 @@ namespace lib4neuro {
             delta = h * (1 + std::abs(parameters[i]));
             former_parameter_value = parameters[i];
 
-            if(delta != 0) {
+            if (delta != 0) {
                 /* Computation of f_val1 = f(x + delta) */
                 parameters[i] = former_parameter_value + delta;
-                f_val1 = this->calculate_single_residual(input, output, &parameters);
+                f_val1 = this->calculate_single_residual(input,
+                                                         output,
+                                                         &parameters);
 
                 /* Computation of f_val2 = f(x - delta) */
                 parameters[i] = former_parameter_value - delta;
-                f_val2 = this->calculate_single_residual(input, output, &parameters);
+                f_val2 = this->calculate_single_residual(input,
+                                                         output,
+                                                         &parameters);
 
-                gradient->at(i) = (f_val1 - f_val2) / (2*delta);
+                gradient->at(i) = (f_val1 - f_val2) / (2 * delta);
             }
 
             /* Restore parameter to the former value */
@@ -395,15 +410,23 @@ namespace lib4neuro {
         }
     }
 
-    void MSE::calculate_error_gradient_single(std::vector<double> &error_vector,
-                                              std::vector<double> &gradient_vector) {
-        std::fill(gradient_vector.begin(), gradient_vector.end(), 0);
+    void MSE::calculate_error_gradient_single(std::vector<double>& error_vector,
+                                              std::vector<double>& gradient_vector) {
+        std::fill(gradient_vector.begin(),
+                  gradient_vector.end(),
+                  0);
         std::vector<double> dummy_input;
-        this->net->add_to_gradient_single( dummy_input, error_vector, 1.0, gradient_vector);
+        this->net->add_to_gradient_single(dummy_input,
+                                          error_vector,
+                                          1.0,
+                                          gradient_vector);
     }
 
     void
-    MSE::analyze_error_gradient(std::vector<double>& params, std::vector<double>& grad, double alpha, size_t batch) {
+    MSE::analyze_error_gradient(std::vector<double>& params,
+                                std::vector<double>& grad,
+                                double alpha,
+                                size_t batch) {
 
         size_t dim_out = this->ds->get_output_dim();
         size_t n_elements = this->ds->get_n_elements();
@@ -416,27 +439,30 @@ namespace lib4neuro {
         std::vector<double> error_derivative(dim_out);
 
         std::vector<double> grad_sum(grad.size());
-        std::fill(grad_sum.begin(), grad_sum.end(), 0.0);
+        std::fill(grad_sum.begin(),
+                  grad_sum.end(),
+                  0.0);
         this->net->write_weights();
         this->net->write_biases();
         for (auto el: *data) {  // Iterate through every element in the test set
 
-            this->net->eval_single_debug(el.first, error_derivative,
-                                   &params);  // Compute the net output and store it into 'output' variable
+            this->net->eval_single_debug(el.first,
+                                         error_derivative,
+                                         &params);  // Compute the net output and store it into 'output' variable
             std::cout << "Input[";
-            for( auto v: el.first){
+            for (auto v: el.first) {
                 std::cout << v << ", ";
             }
             std::cout << "]";
 
             std::cout << " Desired Output[";
-            for( auto v: el.second){
+            for (auto v: el.second) {
                 std::cout << v << ", ";
             }
             std::cout << "]";
 
             std::cout << " Real Output[";
-            for( auto v: error_derivative){
+            for (auto v: error_derivative) {
                 std::cout << v << ", ";
             }
             std::cout << "]";
@@ -445,19 +471,24 @@ namespace lib4neuro {
                 error_derivative[j] = 2.0 * (error_derivative[j] - el.second[j]); //real - expected result
             }
             std::cout << " Error derivative[";
-            for( auto v: error_derivative){
+            for (auto v: error_derivative) {
                 std::cout << v << ", ";
             }
             std::cout << "]";
 
-            std::fill( grad.begin(), grad.end(), 0.0);
-            this->net->add_to_gradient_single_debug(el.first, error_derivative, 1.0, grad);
-            for(size_t i = 0; i < grad.size(); ++i){
+            std::fill(grad.begin(),
+                      grad.end(),
+                      0.0);
+            this->net->add_to_gradient_single_debug(el.first,
+                                                    error_derivative,
+                                                    1.0,
+                                                    grad);
+            for (size_t i = 0; i < grad.size(); ++i) {
                 grad_sum[i] += grad[i];
             }
 
             std::cout << " Gradient[";
-            for( auto v: grad){
+            for (auto v: grad) {
                 std::cout << v << ", ";
             }
             std::cout << "]";
@@ -465,17 +496,20 @@ namespace lib4neuro {
             std::cout << std::endl;
         }
         std::cout << " Total gradient[";
-        for( auto v: grad_sum){
+        for (auto v: grad_sum) {
             std::cout << v << ", ";
         }
         std::cout << "]" << std::endl << std::endl;
     }
 
-    double MSE::eval_single_item_by_idx(size_t i, std::vector<double>*  parameter_vector,
-                                        std::vector<double> &error_vector) {
+    double MSE::eval_single_item_by_idx(size_t i,
+                                        std::vector<double>* parameter_vector,
+                                        std::vector<double>& error_vector) {
         double output = 0, val;
 
-        this->net->eval_single(this->get_dataset()->get_data()->at(i).first, error_vector, parameter_vector);
+        this->net->eval_single(this->get_dataset()->get_data()->at(i).first,
+                               error_vector,
+                               parameter_vector);
 
         for (size_t j = 0; j < error_vector.size(); ++j) {  // Compute difference for every element of the output vector
             val = error_vector.at(j) - this->get_dataset()->get_data()->at(i).second.at(j);
@@ -483,7 +517,8 @@ namespace lib4neuro {
         }
 
         for (size_t j = 0; j < error_vector.size(); ++j) {
-            error_vector[j] = 2.0 * (error_vector[j] - this->get_dataset()->get_data()->at(i).second[j]); //real - expected result
+            error_vector[j] =
+                    2.0 * (error_vector[j] - this->get_dataset()->get_data()->at(i).second[j]); //real - expected result
         }
 
         return sqrt(output);
@@ -500,7 +535,7 @@ namespace lib4neuro {
         }
     }
 
-    double ErrorSum::eval_on_test_data(std::vector<double>*  weights,
+    double ErrorSum::eval_on_test_data(std::vector<double>* weights,
                                        bool verbose) {
         //TODO take care of the case, when there are no test data
 
@@ -519,7 +554,7 @@ namespace lib4neuro {
     }
 
     double ErrorSum::eval_on_test_data(std::string results_file_path,
-                                       std::vector<double>*  weights,
+                                       std::vector<double>* weights,
                                        bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
@@ -527,14 +562,14 @@ namespace lib4neuro {
     }
 
     double ErrorSum::eval_on_test_data(std::ofstream* results_file_path,
-                                       std::vector<double>*  weights,
+                                       std::vector<double>* weights,
                                        bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
         return -1;
     }
 
     double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
-                                      std::vector<double>*  weights,
+                                      std::vector<double>* weights,
                                       bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
@@ -543,7 +578,7 @@ namespace lib4neuro {
 
     double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
                                       std::string results_file_path,
-                                      std::vector<double>*  weights,
+                                      std::vector<double>* weights,
                                       bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
@@ -552,14 +587,14 @@ namespace lib4neuro {
 
     double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
                                       std::ofstream* results_file_path,
-                                      std::vector<double>*  weights,
+                                      std::vector<double>* weights,
                                       bool denormalize_data,
                                       bool verbose) {
         THROW_NOT_IMPLEMENTED_ERROR();
         return -1;
     }
 
-    double ErrorSum::eval(std::vector<double>*  weights,
+    double ErrorSum::eval(std::vector<double>* weights,
                           bool denormalize_data,
                           bool verbose) {
         double output = 0.0;
@@ -576,20 +611,25 @@ namespace lib4neuro {
         return output;
     }
 
-    double ErrorSum::eval_single_item_by_idx(size_t i, std::vector<double>* parameter_vector,
-                                             std::vector<double> &error_vector) {
+    double ErrorSum::eval_single_item_by_idx(size_t i,
+                                             std::vector<double>* parameter_vector,
+                                             std::vector<double>& error_vector) {
         double output = 0.0;
         ErrorFunction* ef = nullptr;
-        std::fill(error_vector.begin(), error_vector.end(), 0);
+        std::fill(error_vector.begin(),
+                  error_vector.end(),
+                  0);
 
         std::vector<double> error_vector_mem(error_vector.size());
         for (size_t j = 0; j < this->summand->size(); ++j) {
             ef = this->summand->at(i);
 
             if (ef) {
-                output += ef->eval_single_item_by_idx(i, parameter_vector, error_vector_mem) * this->summand_coefficient.at(j);
+                output += ef->eval_single_item_by_idx(i,
+                                                      parameter_vector,
+                                                      error_vector_mem) * this->summand_coefficient.at(j);
 
-                for( size_t k = 0; k < error_vector_mem.size(); ++k){
+                for (size_t k = 0; k < error_vector_mem.size(); ++k) {
                     error_vector[k] += error_vector_mem[k] * this->summand_coefficient.at(j);
                 }
             }
@@ -598,7 +638,9 @@ namespace lib4neuro {
         return output;
     }
 
-    void ErrorSum::calculate_error_gradient(std::vector<double>& params, std::vector<double>& grad, double alpha,
+    void ErrorSum::calculate_error_gradient(std::vector<double>& params,
+                                            std::vector<double>& grad,
+                                            double alpha,
                                             size_t batch) {
 
         ErrorFunction* ef = nullptr;
@@ -606,30 +648,39 @@ namespace lib4neuro {
             ef = this->summand->at(i);
 
             if (ef) {
-                ef->calculate_error_gradient(params, grad, this->summand_coefficient.at(i) * alpha, batch);
+                ef->calculate_error_gradient(params,
+                                             grad,
+                                             this->summand_coefficient.at(i) * alpha,
+                                             batch);
             }
         }
     }
 
-    void ErrorSum::calculate_error_gradient_single(std::vector<double> &error_vector,
-                                                   std::vector<double> &gradient_vector) {
+    void ErrorSum::calculate_error_gradient_single(std::vector<double>& error_vector,
+                                                   std::vector<double>& gradient_vector) {
         COUT_INFO("ErrorSum::calculate_error_gradient_single NOT YET IMPLEMENTED!!!");
     }
 
-    void ErrorSum::analyze_error_gradient(std::vector<double>& params, std::vector<double>& grad, double alpha,
-                                            size_t batch) {
+    void ErrorSum::analyze_error_gradient(std::vector<double>& params,
+                                          std::vector<double>& grad,
+                                          double alpha,
+                                          size_t batch) {
 
         ErrorFunction* ef = nullptr;
         for (size_t i = 0; i < this->summand->size(); ++i) {
             ef = this->summand->at(i);
 
             if (ef) {
-                ef->calculate_error_gradient(params, grad, this->summand_coefficient.at(i) * alpha, batch);
+                ef->calculate_error_gradient(params,
+                                             grad,
+                                             this->summand_coefficient.at(i) * alpha,
+                                             batch);
             }
         }
     }
 
-    void ErrorSum::add_error_function(ErrorFunction* F, double alpha) {
+    void ErrorSum::add_error_function(ErrorFunction* F,
+                                      double alpha) {
         if (!this->summand) {
             this->summand = new std::vector<ErrorFunction*>(0);
         }
@@ -648,7 +699,7 @@ namespace lib4neuro {
         return this->dimension;
     }
 
-    std::vector<double>  ErrorSum::get_parameters() {
+    std::vector<double> ErrorSum::get_parameters() {
         return this->summand->at(0)->get_parameters();
     }
 
@@ -658,15 +709,15 @@ namespace lib4neuro {
 
 
     void ErrorSum::calculate_residual_gradient(std::vector<double>* input,
-                                                             std::vector<double>* output,
-                                                             std::vector<double>* gradient,
-                                                             double h) {
+                                               std::vector<double>* output,
+                                               std::vector<double>* gradient,
+                                               double h) {
         THROW_NOT_IMPLEMENTED_ERROR();
     }
 
     double ErrorSum::calculate_single_residual(std::vector<double>* input,
-                                                             std::vector<double>* output,
-                                                             std::vector<double>* parameters) {
+                                               std::vector<double>* output,
+                                               std::vector<double>* parameters) {
         THROW_NOT_IMPLEMENTED_ERROR();
 
         return 0;
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 845f7f16de0df34c077acad966c809badbdad023..c75f0a88164f15ad2c83da2dd35f716acf5ff326 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -24,7 +24,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false,
+        virtual double eval(std::vector<double>* weights = nullptr,
+                            bool denormalize_data = false,
                             bool verbose = false) = 0;
 
         /**
@@ -55,9 +56,9 @@ namespace lib4neuro {
          */
         virtual void
         analyze_error_gradient(std::vector<double>& params,
-                                 std::vector<double>& grad,
-                                 double alpha = 1.0,
-                                 size_t batch = 0) = 0;
+                               std::vector<double>& grad,
+                               double alpha = 1.0,
+                               size_t batch = 0) = 0;
 
         /**
          *
@@ -97,7 +98,8 @@ namespace lib4neuro {
         /**
          *
          */
-        virtual double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) = 0;
+        virtual double eval_on_test_data(std::vector<double>* weights = nullptr,
+                                         bool verbose = false) = 0;
 
         /**
          *
@@ -105,8 +107,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr,
-                bool verbose = false) = 0;
+        virtual double eval_on_test_data(std::string results_file_path,
+                                         std::vector<double>* weights = nullptr,
+                                         bool verbose = false) = 0;
 
         /**
          *
@@ -114,8 +117,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr,
-                bool verbose = false) = 0;
+        virtual double eval_on_test_data(std::ofstream* results_file_path,
+                                         std::vector<double>* weights = nullptr,
+                                         bool verbose = false) = 0;
 
         /**
          *
@@ -123,8 +127,9 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr,
-                bool verbose = false) = 0;
+        virtual double eval_on_data_set(DataSet* data_set,
+                                        std::vector<double>* weights = nullptr,
+                                        bool verbose = false) = 0;
 
         /**
          *
@@ -134,8 +139,10 @@ namespace lib4neuro {
          * @return
          */
         virtual double
-        eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights = nullptr,
-                bool verbose = false) = 0;
+        eval_on_data_set(DataSet* data_set,
+                         std::string results_file_path,
+                         std::vector<double>* weights = nullptr,
+                         bool verbose = false) = 0;
 
         /**
          *
@@ -157,14 +164,17 @@ namespace lib4neuro {
          * @param error_vector
          * @return
          */
-        virtual double eval_single_item_by_idx(size_t  i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) = 0;
+        virtual double eval_single_item_by_idx(size_t i,
+                                               std::vector<double>* parameter_vector,
+                                               std::vector<double>& error_vector) = 0;
 
         /**
          *
          * @param error_vector
          * @param gradient_vector
          */
-        virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) = 0;
+        virtual void calculate_error_gradient_single(std::vector<double>& error_vector,
+                                                     std::vector<double>& gradient_vector) = 0;
 
         /**
          *
@@ -219,7 +229,7 @@ namespace lib4neuro {
         DataSet* ds_test = nullptr;
     };
 
-    class MSE : public ErrorFunction{
+    class MSE : public ErrorFunction {
 
     public:
         /**
@@ -227,7 +237,8 @@ namespace lib4neuro {
          * @param net
          * @param ds
          */
-        LIB4NEURO_API MSE(NeuralNetwork* net, DataSet* ds);
+        LIB4NEURO_API MSE(NeuralNetwork* net,
+                          DataSet* ds);
 
         /**
          *
@@ -237,7 +248,7 @@ namespace lib4neuro {
         LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
                                   bool denormalize_data = false,
                                   bool verbose = false) override;
-    
+
         /**
          *
          * @param params
@@ -260,9 +271,9 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void
         analyze_error_gradient(std::vector<double>& params,
-                                 std::vector<double>& grad,
-                                 double alpha = 1.0,
-                                 size_t batch = 0) override;
+                               std::vector<double>& grad,
+                               double alpha = 1.0,
+                               size_t batch = 0) override;
 
         /**
          * Evaluates the function f(x) = 0 - MSE(x) for a
@@ -272,9 +283,9 @@ namespace lib4neuro {
          * @return
          */
         LIB4NEURO_API
-         double calculate_single_residual(std::vector<double>* input,
-                                                 std::vector<double>* output,
-                                                 std::vector<double>* parameters) override;
+        double calculate_single_residual(std::vector<double>* input,
+                                         std::vector<double>* output,
+                                         std::vector<double>* parameters) override;
 
         /**
          * Compute gradient of the residual function f(x) = 0 - MSE(x) for a specific input x.
@@ -288,7 +299,7 @@ namespace lib4neuro {
         calculate_residual_gradient(std::vector<double>* input,
                                     std::vector<double>* output,
                                     std::vector<double>* gradient,
-                                    double h=1e-3) override;
+                                    double h = 1e-3) override;
 
         /**
          *
@@ -304,7 +315,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override;
+        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr,
+                                               bool verbose = false) override;
 
         /**
          *
@@ -368,14 +380,17 @@ namespace lib4neuro {
          * @param error_vector
          * @return
          */
-        LIB4NEURO_API double eval_single_item_by_idx(size_t  i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) override;
+        LIB4NEURO_API double eval_single_item_by_idx(size_t i,
+                                                     std::vector<double>* parameter_vector,
+                                                     std::vector<double>& error_vector) override;
 
         /**
          *
          * @param error_vector
          * @param gradient_vector
          */
-        LIB4NEURO_API void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) override;
+        LIB4NEURO_API void calculate_error_gradient_single(std::vector<double>& error_vector,
+                                                           std::vector<double>& gradient_vector) override;
     };
 
     class ErrorSum : public ErrorFunction {
@@ -404,7 +419,8 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override;
+        LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr,
+                                               bool verbose = false) override;
 
         /**
          *
@@ -468,20 +484,24 @@ namespace lib4neuro {
          * @param error_vector
          * @return
          */
-        LIB4NEURO_API virtual double eval_single_item_by_idx(size_t  i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) override;
+        LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i,
+                                                             std::vector<double>* parameter_vector,
+                                                             std::vector<double>& error_vector) override;
 
         /**
          *
          * @param error_vector
          * @param gradient_vector
          */
-        LIB4NEURO_API virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) override;
+        LIB4NEURO_API virtual void calculate_error_gradient_single(std::vector<double>& error_vector,
+                                                                   std::vector<double>& gradient_vector) override;
 
         /**
          *
          * @param F
          */
-        LIB4NEURO_API void add_error_function(ErrorFunction* F, double alpha = 1.0);
+        LIB4NEURO_API void add_error_function(ErrorFunction* F,
+                                              double alpha = 1.0);
 
         /**
          *
@@ -510,9 +530,9 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void
         analyze_error_gradient(std::vector<double>& params,
-                                 std::vector<double>& grad,
-                                 double alpha = 1.0,
-                                 size_t batch = 0) override;
+                               std::vector<double>& grad,
+                               double alpha = 1.0,
+                               size_t batch = 0) override;
 
         LIB4NEURO_API void
         calculate_residual_gradient(std::vector<double>* input,
diff --git a/src/ErrorFunction/ErrorFunctionsMock.h b/src/ErrorFunction/ErrorFunctionsMock.h
index 33fe56ce043a4bc6d94cf44708490be30c6a5620..1c5ac82b8c3e9c40459b657834868496028ab938 100644
--- a/src/ErrorFunction/ErrorFunctionsMock.h
+++ b/src/ErrorFunction/ErrorFunctionsMock.h
@@ -10,27 +10,59 @@
 using namespace lib4neuro;
 
 
-MOCK_BASE_CLASS(mock_ErrorFunction, lib4neuro::ErrorFunction)
+MOCK_BASE_CLASS(mock_ErrorFunction, lib4neuro::ErrorFunction
+)
 {
-    MOCK_METHOD(eval, 3)
-    MOCK_METHOD(get_dimension, 0)
-    MOCK_METHOD(calculate_error_gradient, 4)
-    MOCK_METHOD(eval_single_item_by_idx, 3)
-    MOCK_METHOD(calculate_error_gradient_single, 2)
-    MOCK_METHOD(analyze_error_gradient, 4)
-    MOCK_METHOD(calculate_residual_gradient, 4)
-    MOCK_METHOD(calculate_single_residual, 3)
-    MOCK_METHOD(get_parameters, 0)
-    MOCK_METHOD(get_dataset, 0)
-    MOCK_METHOD(get_network_instance, 0)
-    MOCK_METHOD(divide_data_train_test, 1)
-    MOCK_METHOD(return_full_data_set_for_training, 0)
-    MOCK_METHOD(eval_on_test_data, 2, double(std::vector<double>*, bool), id1)
-    MOCK_METHOD(eval_on_test_data, 3, double(std::string, std::vector<double>*, bool), id2)
-    MOCK_METHOD(eval_on_test_data, 3, double(std::ofstream*, std::vector<double>*, bool), id3)
-    MOCK_METHOD(eval_on_data_set, 3, double(DataSet*, std::vector<double>*, bool), id4)
-    MOCK_METHOD(eval_on_data_set, 4, double(DataSet*, std::string, std::vector<double>*, bool), id5)
-    MOCK_METHOD(eval_on_data_set, 5, double(DataSet*, std::ofstream*, std::vector<double>*, bool, bool), id6)
+MOCK_METHOD(eval,
+3)
+MOCK_METHOD(get_dimension,
+0)
+MOCK_METHOD(calculate_error_gradient,
+4)
+MOCK_METHOD(eval_single_item_by_idx,
+3)
+MOCK_METHOD(calculate_error_gradient_single,
+2)
+MOCK_METHOD(analyze_error_gradient,
+4)
+MOCK_METHOD(calculate_residual_gradient,
+4)
+MOCK_METHOD(calculate_single_residual,
+3)
+MOCK_METHOD(get_parameters,
+0)
+MOCK_METHOD(get_dataset,
+0)
+MOCK_METHOD(get_network_instance,
+0)
+MOCK_METHOD(divide_data_train_test,
+1)
+MOCK_METHOD(return_full_data_set_for_training,
+0)
+MOCK_METHOD(eval_on_test_data,
+2,
+double(std::vector<double>
+*, bool), id1)
+MOCK_METHOD(eval_on_test_data,
+3,
+double(std::string, std::vector<double>
+*, bool), id2)
+MOCK_METHOD(eval_on_test_data,
+3,
+double(std::ofstream
+*, std::vector<double>*, bool), id3)
+MOCK_METHOD(eval_on_data_set,
+3,
+double(DataSet
+*, std::vector<double>*, bool), id4)
+MOCK_METHOD(eval_on_data_set,
+4,
+double(DataSet
+*, std::string, std::vector<double>*, bool), id5)
+MOCK_METHOD(eval_on_data_set,
+5,
+double(DataSet
+*, std::ofstream*, std::vector<double>*, bool, bool), id6)
 };
 
 #endif //LIB4NEURO_ERRORFUNCTIONSMOCK_H
diff --git a/src/General/ExprtkWrapper.cpp b/src/General/ExprtkWrapper.cpp
index 6b9ac85873685bb2953e5549b6932ac768323250..cf6144f6fd9a44f2a2dc8339d2c07b670bd235c8 100644
--- a/src/General/ExprtkWrapper.cpp
+++ b/src/General/ExprtkWrapper.cpp
@@ -20,40 +20,46 @@ ExprtkWrapper::ExprtkWrapper() {
     THROW_NOT_IMPLEMENTED_ERROR("This constructors is being used only for serialization purposes.");
 }
 
-ExprtkWrapper::ExprtkWrapper( std::string expression_string ) {
+ExprtkWrapper::ExprtkWrapper(std::string expression_string) {
 
     this->p_impl = new ExprtkWrapperImpl();
 
     this->p_impl->expression_str = expression_string;
 
-    this->p_impl->symbol_table = new symbol_table_t( );
-
-    this->p_impl->symbol_table->add_variable("x", this->p_impl->x);
-    this->p_impl->symbol_table->add_variable("y", this->p_impl->y);
-    this->p_impl->symbol_table->add_variable("z", this->p_impl->z);
-    this->p_impl->symbol_table->add_variable("t", this->p_impl->t);
-    this->p_impl->symbol_table->add_variable("f", this->p_impl->z);
-
-    this->p_impl->expression = new expression_t( );
-    this->p_impl->expression->register_symbol_table( *this->p_impl->symbol_table );
-
-    this->p_impl->parser = new parser_t( );
-    this->p_impl->parser->compile(this->p_impl->expression_str, *this->p_impl->expression );
+    this->p_impl->symbol_table = new symbol_table_t();
+
+    this->p_impl->symbol_table->add_variable("x",
+                                             this->p_impl->x);
+    this->p_impl->symbol_table->add_variable("y",
+                                             this->p_impl->y);
+    this->p_impl->symbol_table->add_variable("z",
+                                             this->p_impl->z);
+    this->p_impl->symbol_table->add_variable("t",
+                                             this->p_impl->t);
+    this->p_impl->symbol_table->add_variable("f",
+                                             this->p_impl->z);
+
+    this->p_impl->expression = new expression_t();
+    this->p_impl->expression->register_symbol_table(*this->p_impl->symbol_table);
+
+    this->p_impl->parser = new parser_t();
+    this->p_impl->parser->compile(this->p_impl->expression_str,
+                                  *this->p_impl->expression);
 }
 
 ExprtkWrapper::~ExprtkWrapper() {
 
-    if( this->p_impl->expression ){
+    if (this->p_impl->expression) {
         delete this->p_impl->expression;
         this->p_impl->expression = nullptr;
     }
 
-    if( this->p_impl->symbol_table ){
+    if (this->p_impl->symbol_table) {
         delete this->p_impl->symbol_table;
         this->p_impl->symbol_table = nullptr;
     }
 
-    if( this->p_impl->parser ){
+    if (this->p_impl->parser) {
         delete this->p_impl->parser;
         this->p_impl->parser = nullptr;
     }
@@ -63,30 +69,33 @@ ExprtkWrapper::~ExprtkWrapper() {
 
 }
 
-double ExprtkWrapper::eval(double x1, double x2, double x3, double x4) {
+double ExprtkWrapper::eval(double x1,
+                           double x2,
+                           double x3,
+                           double x4) {
 
     this->p_impl->x = x1;
     this->p_impl->y = x2;
     this->p_impl->z = x3;
     this->p_impl->t = x4;
 
-    return this->p_impl->expression->value( );
+    return this->p_impl->expression->value();
 
 }
 
-double ExprtkWrapper::eval(std::vector<double> &p) {
+double ExprtkWrapper::eval(std::vector<double>& p) {
 
 
-    if(p.size() > 0){
+    if (p.size() > 0) {
         this->p_impl->x = p[0];
     }
-    if(p.size() > 1){
+    if (p.size() > 1) {
         this->p_impl->y = p[1];
     }
-    if(p.size() > 2){
+    if (p.size() > 2) {
         this->p_impl->z = p[2];
     }
-    if(p.size() > 3){
+    if (p.size() > 3) {
         this->p_impl->t = p[3];
     }
 
diff --git a/src/General/ExprtkWrapper.h b/src/General/ExprtkWrapper.h
index aefaff3e1ac1980efb1a1f779cd179431ed18485..f77bf824ca8169efa7a82313f1e6f7fb754b8d78 100644
--- a/src/General/ExprtkWrapper.h
+++ b/src/General/ExprtkWrapper.h
@@ -28,12 +28,12 @@ public:
      * @param expression_string
      * @param var_dim
      */
-    LIB4NEURO_API ExprtkWrapper( std::string expression_string );
+    LIB4NEURO_API ExprtkWrapper(std::string expression_string);
 
     /**
      *
      */
-    LIB4NEURO_API ExprtkWrapper( );
+    LIB4NEURO_API ExprtkWrapper();
 
     /**
      *
@@ -48,14 +48,17 @@ public:
      * @param x4
      * @return
      */
-    LIB4NEURO_API double eval(double x1 = 0.0, double x2 = 0.0, double x3 = 0.0, double x4 = 0.0);
+    LIB4NEURO_API double eval(double x1 = 0.0,
+                              double x2 = 0.0,
+                              double x3 = 0.0,
+                              double x4 = 0.0);
 
     /**
      *
      * @param p
      * @return
      */
-    LIB4NEURO_API double eval(std::vector<double> &p);
+    LIB4NEURO_API double eval(std::vector<double>& p);
 
 private:
 
@@ -66,7 +69,8 @@ private:
      * to isolate Exprtk dependency from header
      */
     class ExprtkWrapperImpl;
-    ExprtkWrapperImpl *p_impl;
+
+    ExprtkWrapperImpl* p_impl;
 
 };
 
diff --git a/src/General/ExprtkWrapperSerialization.h b/src/General/ExprtkWrapperSerialization.h
index 66f013d1fb6a780082f82535d328a1ccf5f3da05..a57a882348e71d75de4f05dd3ed7ce1d761b6e8f 100644
--- a/src/General/ExprtkWrapperSerialization.h
+++ b/src/General/ExprtkWrapperSerialization.h
@@ -13,14 +13,14 @@
 BOOST_CLASS_EXPORT_KEY(ExprtkWrapper);
 
 typedef exprtk::symbol_table<double> symbol_table_t;
-typedef exprtk::expression<double>     expression_t;
-typedef exprtk::parser<double>             parser_t;
+typedef exprtk::expression<double> expression_t;
+typedef exprtk::parser<double> parser_t;
 
 /**
  * Class implementing the private properties
  * of ExprtkWrapper class.
  */
-class ExprtkWrapper :: ExprtkWrapperImpl {
+class ExprtkWrapper::ExprtkWrapperImpl {
 
 public:
 
@@ -33,17 +33,17 @@ public:
     /**
      *
      */
-    expression_t *expression = nullptr;
+    expression_t* expression = nullptr;
 
     /**
      *
      */
-    symbol_table_t *symbol_table = nullptr;
+    symbol_table_t* symbol_table = nullptr;
 
     /**
      *
      */
-    parser_t * parser = nullptr;
+    parser_t* parser = nullptr;
 
     /**
      * variables
@@ -56,9 +56,11 @@ public:
     std::string expression_str;
 };
 
-struct ExprtkWrapper :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, ExprtkWrapper& n, const unsigned int version) {
+struct ExprtkWrapper::access {
+    template<class Archive>
+    static void serialize(Archive& ar,
+                          ExprtkWrapper& n,
+                          const unsigned int version) {
         ar & n.p_impl->expression_str;
         ar & n.p_impl->x & n.p_impl->y & n.p_impl->z & n.p_impl->t & n.p_impl->f;
     }
@@ -75,9 +77,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, ExprtkWrapper& n, const unsigned int version)
-        {
-            ExprtkWrapper::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       ExprtkWrapper& n,
+                       const unsigned int version) {
+            ExprtkWrapper::access::serialize(ar,
+                                             n,
+                                             version);
         }
 
     } // namespace serialization
diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index f20f17f4837a5b1115f9e955c0b9941c6a37eaf3..040a147b174e968a827c0221d258c45378b5972f 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -10,7 +10,10 @@
 #include "message.h"
 
 namespace lib4neuro {
-    GradientDescent::GradientDescent(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
+    GradientDescent::GradientDescent(double epsilon,
+                                     size_t n_to_restart,
+                                     int max_iters,
+                                     size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
         this->maximum_niters = max_iters;
@@ -20,9 +23,9 @@ namespace lib4neuro {
     GradientDescent::~GradientDescent() {
     }
 
-    void GradientDescent::eval_step_size_mk(double &gamma,
+    void GradientDescent::eval_step_size_mk(double& gamma,
                                             double beta,
-                                            double &c,
+                                            double& c,
                                             double grad_norm_prev,
                                             double grad_norm,
                                             double fi,
@@ -34,51 +37,54 @@ namespace lib4neuro {
             c *= 1.0000005;
         }
 
-        gamma *= std::pow(c, 1.0 - 2.0 * beta) * std::pow(grad_norm_prev / grad_norm, 1.0 / c);
+        gamma *= std::pow(c,
+                          1.0 - 2.0 * beta) * std::pow(grad_norm_prev / grad_norm,
+                                                       1.0 / c);
 
     }
 
     bool GradientDescent::perform_feasible_1D_step(
-            lib4neuro::ErrorFunction &ef,
+            lib4neuro::ErrorFunction& ef,
             double error_previous,
             double step_coefficient,
             std::shared_ptr<std::vector<double>> direction,
             std::shared_ptr<std::vector<double>> parameters_before,
             std::shared_ptr<std::vector<double>> parameters_after
-            ) {
+    ) {
 
         size_t i;
 
         boost::random::mt19937 gen(std::time(0));
-        boost::random::uniform_int_distribution<> dis(0, direction->size());
-        size_t  max_dir_idx = dis(gen);
+        boost::random::uniform_int_distribution<> dis(0,
+                                                      direction->size());
+        size_t max_dir_idx = dis(gen);
 
         double error_current = error_previous + 1.0;
-        while( error_current >=  error_previous ){
-            (*parameters_after)[max_dir_idx] = (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
+        while (error_current >= error_previous) {
+            (*parameters_after)[max_dir_idx] =
+                    (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
 
-            error_current = ef.eval( parameters_after.get() );
-            if( step_coefficient < 1e-32){
+            error_current = ef.eval(parameters_after.get());
+            if (step_coefficient < 1e-32) {
                 for (i = 0; i < direction->size(); ++i) {
                     (*parameters_after)[i] = (*parameters_before)[i] - step_coefficient * (*direction)[i];
                 }
                 return false;
-            }
-            else{
-                if( error_current >=  error_previous ){
+            } else {
+                if (error_current >= error_previous) {
                     step_coefficient *= 0.5;
-                }
-                else{
+                } else {
                 }
             }
         }
         return true;
     }
 
-    void GradientDescent::optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs) {
+    void GradientDescent::optimize(lib4neuro::ErrorFunction& ef,
+                                   std::ofstream* ofs) {
 
         /* Copy data set max and min values, if it's normalized */
-        if(ef.get_dataset()->is_normalized()) {
+        if (ef.get_dataset()->is_normalized()) {
             ef.get_network_instance()->set_normalization_strategy_instance(
                     ef.get_dataset()->get_normalization_strategy());
         }
@@ -86,7 +92,7 @@ namespace lib4neuro {
         COUT_INFO("Finding a solution via a Gradient Descent method with adaptive step-length..." << std::endl);
         COUT_INFO("Initial error: " << ef.eval() << std::endl);
 
-        if(ofs && ofs->is_open()) {
+        if (ofs && ofs->is_open()) {
             *ofs << "Finding a solution via a Gradient Descent method with adaptive step-length..." << std::endl;
             *ofs << "Initial error: " << ef.eval() << std::endl;
         }
@@ -110,9 +116,12 @@ namespace lib4neuro {
         std::vector<double>* ptr_mem;
 
 
-
-        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-        std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
+        std::fill(gradient_current->begin(),
+                  gradient_current->end(),
+                  0.0);
+        std::fill(gradient_prev->begin(),
+                  gradient_prev->end(),
+                  0.0);
 
         val = ef.eval(params_current);
         double coeff = 1;
@@ -126,8 +135,13 @@ namespace lib4neuro {
             grad_norm_prev = grad_norm;
 
             /* reset of the current gradient */
-            std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-            ef.calculate_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
+            std::fill(gradient_current->begin(),
+                      gradient_current->end(),
+                      0.0);
+            ef.calculate_error_gradient(*params_current,
+                                        *gradient_current,
+                                        1.0,
+                                        this->batch);
 
 
             grad_norm = 0.0;
@@ -138,7 +152,7 @@ namespace lib4neuro {
 
             /* Update of the parameters */
             /* step length calculation */
-            if (iter_counter < 10 || iter_counter % this->restart_frequency == 0 ) {
+            if (iter_counter < 10 || iter_counter % this->restart_frequency == 0) {
                 /* fixed step length */
                 gamma = 0.1 * this->tolerance;
                 cooling = 1.0;
@@ -149,15 +163,20 @@ namespace lib4neuro {
                     sx += (gradient_current->at(i) * gradient_prev->at(i));
                 }
                 sx /= grad_norm * grad_norm_prev;
-                if( sx < -1.0 + 5e-12 ){
+                if (sx < -1.0 + 5e-12) {
                     sx = -1 + 5e-12;
-                }
-                else if( sx > 1.0 - 5e-12 ){
+                } else if (sx > 1.0 - 5e-12) {
                     sx = 1 - 5e-12;
                 }
                 beta = std::sqrt(std::acos(sx) / lib4neuro::PI);
 
-                eval_step_size_mk(gamma, beta, c, grad_norm_prev, grad_norm, val, prev_val);
+                eval_step_size_mk(gamma,
+                                  beta,
+                                  c,
+                                  grad_norm_prev,
+                                  grad_norm,
+                                  val,
+                                  prev_val);
             }
 
             for (i = 0; i < gradient_current->size(); ++i) {
@@ -178,49 +197,58 @@ namespace lib4neuro {
             params_current = ptr_mem;
 
 
-            COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(iter_counter)
+            COUT_DEBUG(std::string("Iteration: ") << (unsigned int) (iter_counter)
                                                   << ". Step size: " << gamma * cooling
                                                   << ". C: " << c
                                                   << ". Gradient norm: " << grad_norm
                                                   << ". Total error: " << val
-                                                  << ".\r" );
+                                                  << ".\r");
 
-            WRITE_TO_OFS_DEBUG(ofs, "Iteration: " << (unsigned int)(iter_counter)
-                                                  << ". Step size: " << gamma * cooling
-                                                  << ". C: " << c
-                                                  << ". Gradient norm: " << grad_norm
-                                                  << ". Total error: " << val
-                                                  << "." << std::endl);
+            WRITE_TO_OFS_DEBUG(ofs,
+                               "Iteration: " << (unsigned int) (iter_counter)
+                                             << ". Step size: " << gamma * cooling
+                                             << ". C: " << c
+                                             << ". Gradient norm: " << grad_norm
+                                             << ". Total error: " << val
+                                             << "." << std::endl);
 
 
             cooling *= 0.9999;
 
         }
-        COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(iter_counter)
+        COUT_DEBUG(std::string("Iteration: ") << (unsigned int) (iter_counter)
                                               << ". Step size: " << gamma
                                               << ". C: " << c
                                               << ". Gradient norm: " << grad_norm
                                               << ". Total error: " << val
                                               << "." << std::endl);
-        COUT_DEBUG("Number of total steps: " << counter_bad_guesses + counter_good_guesses << ", good: " << counter_good_guesses << ", bad: " << counter_bad_guesses << ", from which " << counter_simplified_direction_good + counter_simplified_direction_bad << " were attempted by simplified direction, success: " << counter_simplified_direction_good << ", fail: " << counter_simplified_direction_bad << std::endl << std::endl );
-
-        if(iter_idx == 0) {
-            COUT_INFO(std::endl << "Maximum number of iterations (" << this->maximum_niters << ") was reached! Final error: " << val << std::endl);
-
-            if(ofs && ofs->is_open()) {
-                *ofs << "Maximum number of iterations (" << this->maximum_niters << ") was reached! Final error: " << val << std::endl;
+        COUT_DEBUG("Number of total steps: " << counter_bad_guesses + counter_good_guesses << ", good: "
+                                             << counter_good_guesses << ", bad: " << counter_bad_guesses
+                                             << ", from which "
+                                             << counter_simplified_direction_good + counter_simplified_direction_bad
+                                             << " were attempted by simplified direction, success: "
+                                             << counter_simplified_direction_good << ", fail: "
+                                             << counter_simplified_direction_bad << std::endl << std::endl);
+
+        if (iter_idx == 0) {
+            COUT_INFO(std::endl << "Maximum number of iterations (" << this->maximum_niters
+                                << ") was reached! Final error: " << val << std::endl);
+
+            if (ofs && ofs->is_open()) {
+                *ofs << "Maximum number of iterations (" << this->maximum_niters << ") was reached! Final error: "
+                     << val << std::endl;
 
             }
 
         } else {
             COUT_INFO(std::endl << "Gradient Descent method converged after "
-                              << this->maximum_niters - iter_idx
-                              << " iterations. Final error:" << val
-                              << std::endl);
+                                << this->maximum_niters - iter_idx
+                                << " iterations. Final error:" << val
+                                << std::endl);
 #ifdef L4N_DEBUG
-            if(ofs && ofs->is_open()) {
+            if (ofs && ofs->is_open()) {
                 *ofs << "Gradient Descent method converged after "
-                     << this->maximum_niters-iter_idx
+                     << this->maximum_niters - iter_idx
                      << " iterations."
                      << std::endl;
             }
@@ -228,7 +256,7 @@ namespace lib4neuro {
         }
 
         this->optimal_parameters = *params_current;
-        ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
+        ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
 
     }
 }
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index ff1a7b3ff285a52350b4aea1403954eca40f76a2..7d76d1b1baa8a2449744d6195b202dd9a18edf29 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -34,7 +34,7 @@ namespace lib4neuro {
         /**
          *
          */
-		size_t batch;
+        size_t batch;
 
         /**
          * Maximal number of iterations - optimization will stop after that, even if not converged
@@ -60,9 +60,9 @@ namespace lib4neuro {
          * @param fim[in] value of the error in the previous iteration
          */
         virtual void
-        eval_step_size_mk(double &gamma,
+        eval_step_size_mk(double& gamma,
                           double beta,
-                          double &c,
+                          double& c,
                           double grad_norm_prev,
                           double grad_norm,
                           double fi,
@@ -78,13 +78,13 @@ namespace lib4neuro {
          * @param parameters_after[out] suggested state of the parameters after the analysis completes
          */
         virtual bool perform_feasible_1D_step(
-                lib4neuro::ErrorFunction &ef,
+                lib4neuro::ErrorFunction& ef,
                 double error_previous,
                 double step_coefficient,
                 std::shared_ptr<std::vector<double>> direction,
                 std::shared_ptr<std::vector<double>> parameters_before,
                 std::shared_ptr<std::vector<double>> parameters_after
-                );
+        );
 
     public:
 
@@ -94,7 +94,10 @@ namespace lib4neuro {
          * @param n_to_restart Number of iterations to reset step size to tolerance/10.0
          * @param max_iters Maximal number of iterations - optimization will stop after that, even if not converged
          */
-        LIB4NEURO_API explicit GradientDescent(double epsilon = 1e-3, size_t n_to_restart = 100, int max_iters = 1000, size_t batch = 0);
+        LIB4NEURO_API explicit GradientDescent(double epsilon = 1e-3,
+                                               size_t n_to_restart = 100,
+                                               int max_iters = 1000,
+                                               size_t batch = 0);
 
         /**
          * Deallocates the instance
@@ -105,7 +108,8 @@ namespace lib4neuro {
          *
          * @param ef
          */
-        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
+        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction& ef,
+                                    std::ofstream* ofs = nullptr) override;
 
     };
 }
diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp
index 1a6b54dc767a2efd28e0e4923ace28f03606ad96..5265c8b0765bf8aa5bd1d4356a62600e27e010ee 100644
--- a/src/LearningMethods/GradientDescentBB.cpp
+++ b/src/LearningMethods/GradientDescentBB.cpp
@@ -9,7 +9,10 @@
 #include "message.h"
 
 namespace lib4neuro {
-    GradientDescentBB::GradientDescentBB(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
+    GradientDescentBB::GradientDescentBB(double epsilon,
+                                         size_t n_to_restart,
+                                         int max_iters,
+                                         size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
         this->maximum_niters = max_iters;
@@ -20,10 +23,11 @@ namespace lib4neuro {
     }
 
 
-    void GradientDescentBB::optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs) {
+    void GradientDescentBB::optimize(lib4neuro::ErrorFunction& ef,
+                                     std::ofstream* ofs) {
 
         /* Copy data set max and min values, if it's normalized */
-        if(ef.get_dataset()->is_normalized()) {
+        if (ef.get_dataset()->is_normalized()) {
             ef.get_network_instance()->set_normalization_strategy_instance(
                     ef.get_dataset()->get_normalization_strategy());
         }
@@ -31,7 +35,7 @@ namespace lib4neuro {
         COUT_INFO("Finding a solution via a Gradient Descent method with adaptive step-length..." << std::endl);
         COUT_INFO("Initial error: " << ef.eval() << std::endl);
 
-        if(ofs && ofs->is_open()) {
+        if (ofs && ofs->is_open()) {
             *ofs << "Finding a solution via a Gradient Descent method with adaptive step-length..." << std::endl;
             *ofs << "Initial error: " << ef.eval() << std::endl;
         }
@@ -57,12 +61,16 @@ namespace lib4neuro {
         std::vector<double>* ptr_mem;
 
         double alpha = -1.0, cc, gg;
-        std::vector<double> dot__( 3 );
+        std::vector<double> dot__(3);
         double d1 = 0.0, d2 = 0.0, d3 = 0.0;
 
 
-        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-        std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
+        std::fill(gradient_current->begin(),
+                  gradient_current->end(),
+                  0.0);
+        std::fill(gradient_prev->begin(),
+                  gradient_prev->end(),
+                  0.0);
         val = ef.eval(params_current);
         val_best = val;
 
@@ -74,8 +82,13 @@ namespace lib4neuro {
             grad_norm_prev = grad_norm;
 
             /* reset of the current gradient */
-            std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-            ef.calculate_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
+            std::fill(gradient_current->begin(),
+                      gradient_current->end(),
+                      0.0);
+            ef.calculate_error_gradient(*params_current,
+                                        *gradient_current,
+                                        1.0,
+                                        this->batch);
 
 
             grad_norm = 0.0;
@@ -87,18 +100,20 @@ namespace lib4neuro {
 
             /* Update of the parameters */
             /* step length calculation */
-            if (iter_counter < 10 || iter_counter % this->restart_frequency < 10 ) {
+            if (iter_counter < 10 || iter_counter % this->restart_frequency < 10) {
                 /* fixed step length */
                 gamma = 0.1 * this->tolerance;
                 cooling_factor = 1.0;
             } else {
 
-                std::fill( dot__.begin( ), dot__.end( ), 0.0 );
+                std::fill(dot__.begin(),
+                          dot__.end(),
+                          0.0);
                 d1 = d2 = d3 = 0.0;
 
-                for ( int d = 0; d < gradient_current->size(); d++ ) {
-                    cc = params_current->at( d ) - params_prev->at( d );
-                    gg = gradient_current->at( d ) - gradient_prev->at( d );
+                for (int d = 0; d < gradient_current->size(); d++) {
+                    cc = params_current->at(d) - params_prev->at(d);
+                    gg = gradient_current->at(d) - gradient_prev->at(d);
 
                     d1 += cc * cc;
                     d2 += cc * gg;
@@ -110,8 +125,8 @@ namespace lib4neuro {
                 dot__[2] = d3;
 
                 gamma = 1;
-                if ( fabs( dot__[1] ) > 0.0 ) {
-                    gamma = 0.25*( dot__[0] / dot__[1] );
+                if (fabs(dot__[1]) > 0.0) {
+                    gamma = 0.25 * (dot__[0] / dot__[1]);
                 }
             }
 
@@ -130,45 +145,48 @@ namespace lib4neuro {
             params_current = ptr_mem;
 
             val = ef.eval(params_current);
-            if( val < val_best ){
+            if (val < val_best) {
                 val_best = val;
 
-                for(i = 0; i < gradient_current->size(); ++i){
-                    params_best->at( i ) = params_current->at( i );
+                for (i = 0; i < gradient_current->size(); ++i) {
+                    params_best->at(i) = params_current->at(i);
                 }
             }
 
-            COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(iter_counter)
-                                                  << ". Step size: " << gamma*cooling_factor
+            COUT_DEBUG(std::string("Iteration: ") << (unsigned int) (iter_counter)
+                                                  << ". Step size: " << gamma * cooling_factor
                                                   << ". C: " << c
                                                   << ". Gradient norm: " << grad_norm
                                                   << ". Total error: " << val << ". the lowest error: " << val_best
-                                                  << ".\r" );
+                                                  << ".\r");
 
-            WRITE_TO_OFS_DEBUG(ofs, "Iteration: " << (unsigned int)(iter_counter)
-                                                  << ". Step size: " << gamma*cooling_factor
-                                                  << ". C: " << c
-                                                  << ". Gradient norm: " << grad_norm
-                                                  << ". Total error: " << val << ". the lowest error: " << val_best
-                                                  << "." << std::endl);
+            WRITE_TO_OFS_DEBUG(ofs,
+                               "Iteration: " << (unsigned int) (iter_counter)
+                                             << ". Step size: " << gamma * cooling_factor
+                                             << ". C: " << c
+                                             << ". Gradient norm: " << grad_norm
+                                             << ". Total error: " << val << ". the lowest error: " << val_best
+                                             << "." << std::endl);
 
 
             cooling_factor *= 0.99999;
 
         }
-        COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(iter_counter)
-                                              << ". Step size: " << gamma*cooling_factor
+        COUT_DEBUG(std::string("Iteration: ") << (unsigned int) (iter_counter)
+                                              << ". Step size: " << gamma * cooling_factor
                                               << ". C: " << c
                                               << ". Gradient norm: " << grad_norm
                                               << ". Total error: " << val
                                               << "." << std::endl);
 
 
-        if(iter_idx == 0) {
-            COUT_INFO(std::endl << "Maximum number of iterations (" << this->maximum_niters << ") was reached! Final error: " << val_best << std::endl);
+        if (iter_idx == 0) {
+            COUT_INFO(std::endl << "Maximum number of iterations (" << this->maximum_niters
+                                << ") was reached! Final error: " << val_best << std::endl);
 
-            if(ofs && ofs->is_open()) {
-                *ofs << "Maximum number of iterations (" << this->maximum_niters << ") was reached! Final error: " << val_best << std::endl;
+            if (ofs && ofs->is_open()) {
+                *ofs << "Maximum number of iterations (" << this->maximum_niters << ") was reached! Final error: "
+                     << val_best << std::endl;
 
             }
 
@@ -178,9 +196,9 @@ namespace lib4neuro {
                                 << " iterations. Final error:" << val_best
                                 << std::endl);
 #ifdef L4N_DEBUG
-            if(ofs && ofs->is_open()) {
+            if (ofs && ofs->is_open()) {
                 *ofs << "Gradient Descent method converged after "
-                     << this->maximum_niters-iter_idx
+                     << this->maximum_niters - iter_idx
                      << " iterations."
                      << std::endl;
             }
diff --git a/src/LearningMethods/GradientDescentBB.h b/src/LearningMethods/GradientDescentBB.h
index 5284c2812c76357f382a4fb9f43a65bc4d2fb0b9..ae65c36f6bbab9404fc0f4b06d036cb3ee265810 100644
--- a/src/LearningMethods/GradientDescentBB.h
+++ b/src/LearningMethods/GradientDescentBB.h
@@ -60,7 +60,10 @@ namespace lib4neuro {
          * @param n_to_restart Number of iterations to reset step size to tolerance/10.0
          * @param max_iters Maximal number of iterations - optimization will stop after that, even if not converged
          */
-        LIB4NEURO_API explicit GradientDescentBB(double epsilon = 1e-3, size_t n_to_restart = 100, int max_iters = 1000, size_t batch = 0);
+        LIB4NEURO_API explicit GradientDescentBB(double epsilon = 1e-3,
+                                                 size_t n_to_restart = 100,
+                                                 int max_iters = 1000,
+                                                 size_t batch = 0);
 
         /**
          * Deallocates the instance
@@ -71,7 +74,8 @@ namespace lib4neuro {
          *
          * @param ef
          */
-        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
+        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction& ef,
+                                    std::ofstream* ofs = nullptr) override;
 
     };
 
diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp
index a107c634d643d8b29c04056742665524c75eca18..06c4b774b41ba50d11fafde1be5edb39209433de 100644
--- a/src/LearningMethods/GradientDescentSingleItem.cpp
+++ b/src/LearningMethods/GradientDescentSingleItem.cpp
@@ -11,7 +11,10 @@
 #include "message.h"
 
 namespace lib4neuro {
-    GradientDescentSingleItem::GradientDescentSingleItem(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
+    GradientDescentSingleItem::GradientDescentSingleItem(double epsilon,
+                                                         size_t n_to_restart,
+                                                         int max_iters,
+                                                         size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
         this->maximum_niters = max_iters;
@@ -26,8 +29,10 @@ namespace lib4neuro {
     }
 
 
-    double GradientDescentSingleItem::get_optimal_step_size(lib4neuro::ErrorFunction &f, std::vector<double> &x,
-                                                   std::vector<double> &d, size_t n_elems) {
+    double GradientDescentSingleItem::get_optimal_step_size(lib4neuro::ErrorFunction& f,
+                                                            std::vector<double>& x,
+                                                            std::vector<double>& d,
+                                                            size_t n_elems) {
 
         double alpha = 10.0 / n_elems;
         alpha = 1.0;
@@ -36,68 +41,82 @@ namespace lib4neuro {
 
 
         std::shared_ptr<std::vector<double>> shifted_x = std::make_shared<std::vector<double>>(std::vector<double>(x));
-        while( value_shifted > value ){
+        while (value_shifted > value) {
             alpha *= 0.5;
 
-            for( size_t i = 0; i < x.size(); ++i ){
+            for (size_t i = 0; i < x.size(); ++i) {
                 (*shifted_x).at(i) = x.at(i) - alpha * d.at(i);
             }
 
-            value_shifted = f.eval( shifted_x.get() );
+            value_shifted = f.eval(shifted_x.get());
         }
         return alpha;
     }
 
 
-    void GradientDescentSingleItem::optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs) {
+    void GradientDescentSingleItem::optimize(lib4neuro::ErrorFunction& ef,
+                                             std::ofstream* ofs) {
 
-        COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..." << std::endl);
+        COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..."
+                          << std::endl);
         COUT_INFO("Initial error: " << ef.eval() << std::endl);
 
         size_t total_elements = ef.get_dataset()->get_n_elements(), updated_elements = 0, iter = 0;
         double max_error = 1.0, error, gamma;
         size_t iter_idx = this->maximum_niters;
-        size_t  dim = ef.get_network_instance()->get_n_biases() + ef.get_network_instance()->get_n_weights();
+        size_t dim = ef.get_network_instance()->get_n_biases() + ef.get_network_instance()->get_n_weights();
 
         std::vector<double> parameter_vector = ef.get_parameters();
         std::vector<double> gradient_vector(dim);
         std::vector<double> search_direction(dim);
         std::vector<double> error_vector(ef.get_network_instance()->get_n_outputs());
-        while( max_error >= this->tolerance && iter_idx >= 1 ){
+        while (max_error >= this->tolerance && iter_idx >= 1) {
             iter_idx--;
             iter++;
 
             max_error = 0.0;
             updated_elements = 0;
-            std::fill(search_direction.begin(), search_direction.end(), 0);
-            for( size_t i = 0; i < ef.get_dataset()->get_n_elements(); ++i){
-                error = ef.eval_single_item_by_idx( i, &parameter_vector, error_vector );
-
-                if( error > max_error ){
+            std::fill(search_direction.begin(),
+                      search_direction.end(),
+                      0);
+            for (size_t i = 0; i < ef.get_dataset()->get_n_elements(); ++i) {
+                error = ef.eval_single_item_by_idx(i,
+                                                   &parameter_vector,
+                                                   error_vector);
+
+                if (error > max_error) {
                     max_error = error;
                 }
 
-                if( error > this->tolerance ){
+                if (error > this->tolerance) {
                     updated_elements++;
-                    ef.calculate_error_gradient_single(error_vector, gradient_vector);
+                    ef.calculate_error_gradient_single(error_vector,
+                                                       gradient_vector);
 
-                    for(size_t j = 0; j < dim; ++j ){
-                        search_direction[ j ] += gradient_vector[ j ];
+                    for (size_t j = 0; j < dim; ++j) {
+                        search_direction[j] += gradient_vector[j];
                     }
                 }
             }
-            gamma = this->get_optimal_step_size(ef, parameter_vector, search_direction, updated_elements);
+            gamma = this->get_optimal_step_size(ef,
+                                                parameter_vector,
+                                                search_direction,
+                                                updated_elements);
 
-            for( size_t j = 0; j < dim; ++j ){
-                parameter_vector[ j ] -= gamma * search_direction[ j ];
+            for (size_t j = 0; j < dim; ++j) {
+                parameter_vector[j] -= gamma * search_direction[j];
             }
 
-            COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements << ", # of elements with high error: " << updated_elements << ", max. error: " << max_error << "\r");
+            COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements
+                                     << ", # of elements with high error: " << updated_elements << ", max. error: "
+                                     << max_error << "\r");
         }
-        COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements << ", # of elements with high error: " << updated_elements << ", max. error: " << max_error << std::endl);
+        COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements
+                                 << ", # of elements with high error: " << updated_elements << ", max. error: "
+                                 << max_error << std::endl);
 
         this->optimal_parameters = &parameter_vector;
-        ef.get_network_instance()->copy_parameter_space( this->optimal_parameters );
+        ef.get_network_instance()->copy_parameter_space(this->optimal_parameters);
 
     }
 
diff --git a/src/LearningMethods/GradientDescentSingleItem.h b/src/LearningMethods/GradientDescentSingleItem.h
index fccca9f43ba946a6dbd776d690c3e6e06e98829c..85737e5deb5caff7a567f58352b8dbf3b98703f6 100644
--- a/src/LearningMethods/GradientDescentSingleItem.h
+++ b/src/LearningMethods/GradientDescentSingleItem.h
@@ -69,7 +69,10 @@ namespace lib4neuro {
          * @param n_elems
          * @return
          */
-        virtual double get_optimal_step_size(lib4neuro::ErrorFunction &f, std::vector<double> &x, std::vector<double> &d, size_t n_elems);
+        virtual double get_optimal_step_size(lib4neuro::ErrorFunction& f,
+                                             std::vector<double>& x,
+                                             std::vector<double>& d,
+                                             size_t n_elems);
 
 
     public:
@@ -80,7 +83,10 @@ namespace lib4neuro {
          * @param n_to_restart Number of iterations to reset step size to tolerance/10.0
          * @param max_iters Maximal number of iterations - optimization will stop after that, even if not converged
          */
-        LIB4NEURO_API explicit GradientDescentSingleItem(double epsilon = 1e-3, size_t n_to_restart = 100, int max_iters = 1000, size_t batch = 0);
+        LIB4NEURO_API explicit GradientDescentSingleItem(double epsilon = 1e-3,
+                                                         size_t n_to_restart = 100,
+                                                         int max_iters = 1000,
+                                                         size_t batch = 0);
 
         /**
          * Deallocates the instance
@@ -91,7 +97,8 @@ namespace lib4neuro {
          *
          * @param ef
          */
-        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
+        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction& ef,
+                                    std::ofstream* ofs = nullptr) override;
     };
 
 }
diff --git a/src/LearningMethods/LearningMethods.cpp b/src/LearningMethods/LearningMethods.cpp
index f1aba80e197acda08aa0eb825259fda2c702c4fe..f816582a109c07326370322cc94ed821e266b0c8 100644
--- a/src/LearningMethods/LearningMethods.cpp
+++ b/src/LearningMethods/LearningMethods.cpp
@@ -14,6 +14,7 @@ namespace lib4neuro {
 
     void GradientLearningMethod::optimize(ErrorFunction& ef,
                                           std::ofstream* ofs) {
-        this->optimize(ef, ofs);
+        this->optimize(ef,
+                       ofs);
     }
 }
\ No newline at end of file
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index 35a1811a2da361d41e5a41bec3fc4711213359ae..d7266cb8fc9d8b6c9de6561a65e99eb48d4e38ba 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -10,7 +10,8 @@
 
 namespace lib4neuro {
 
-    LearningSequence::LearningSequence( double tolerance, int max_n_cycles ){
+    LearningSequence::LearningSequence(double tolerance,
+                                       int max_n_cycles) {
         this->tol = tolerance;
         this->max_number_of_cycles = max_n_cycles;
     }
@@ -18,22 +19,24 @@ namespace lib4neuro {
     LearningSequence::~LearningSequence() = default;
 
     void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) {
-        this->learning_sequence.push_back( method );
+        this->learning_sequence.push_back(method);
     }
 
-    void LearningSequence::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
+    void LearningSequence::optimize(lib4neuro::ErrorFunction& ef,
+                                    std::ofstream* ofs) {
         double error = ef.eval();
         this->optimal_parameters = ef.get_parameters();
         double the_best_error = error;
         int mcycles = this->max_number_of_cycles, cycle_idx = 0;
 
         std::vector<double> params;
-        while( error > this->tol && mcycles != 0){
+        while (error > this->tol && mcycles != 0) {
             mcycles--;
             cycle_idx++;
 
-            for(auto m: this->learning_sequence ){
-                m->optimize( ef, ofs );
+            for (auto m: this->learning_sequence) {
+                m->optimize(ef,
+                            ofs);
 
                 //TODO do NOT copy vectors if not needed
                 params = *m->get_parameters();
@@ -41,18 +44,18 @@ namespace lib4neuro {
 
                 ef.get_network_instance()->copy_parameter_space(&params);
 
-                if( error < the_best_error ){
+                if (error < the_best_error) {
                     the_best_error = error;
                     this->optimal_parameters = ef.get_parameters();
                 }
 
-                if( error <= this->tol ){
-                    ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
+                if (error <= this->tol) {
+                    ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
                     return;
                 }
             }
-            COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl );
+            COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl);
         }
-        ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
+        ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
     }
 }
\ No newline at end of file
diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h
index 2e2b95113808077be0293aa38e8cfadbb4eaa79b..d0de54b8e83c17f2279ca3886fa67f24bf761e19 100644
--- a/src/LearningMethods/LearningSequence.h
+++ b/src/LearningMethods/LearningSequence.h
@@ -42,7 +42,8 @@ namespace lib4neuro {
         /**
          *
          */
-        LIB4NEURO_API explicit LearningSequence( double tolerance = 1e-6, int max_n_cycles = -1);
+        LIB4NEURO_API explicit LearningSequence(double tolerance = 1e-6,
+                                                int max_n_cycles = -1);
 
         /**
          * Deallocates the instance
@@ -54,13 +55,14 @@ namespace lib4neuro {
          * @param ef
          * @param ofs
          */
-        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
+        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction& ef,
+                                    std::ofstream* ofs = nullptr) override;
 
         /**
          *
          * @param method
          */
-        LIB4NEURO_API void add_learning_method( std::shared_ptr<LearningMethod> method );
+        LIB4NEURO_API void add_learning_method(std::shared_ptr<LearningMethod> method);
     };
 
 }
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index a83da4a975cf04fc17aa8e958e963e193adff579..824acdb967472d30199522991e7b74c6bc3d2237 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -36,11 +36,17 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
      * @param rhs
      * @param data
      */
-    void get_jacobian_and_rhs(lib4neuro::NeuralNetwork &f, arma::Mat<double> &J, arma::Col<double> &rhs, std::vector<std::pair<std::vector<double>, std::vector<double>>> &data);
+    void get_jacobian_and_rhs(lib4neuro::NeuralNetwork& f,
+                              arma::Mat<double>& J,
+                              arma::Col<double>& rhs,
+                              std::vector<std::pair<std::vector<double>, std::vector<double>>>& data);
 };
 
 void lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl::get_jacobian_and_rhs(
-        lib4neuro::NeuralNetwork &f, arma::Mat<double> &J, arma::Col<double> &rhs, std::vector<std::pair<std::vector<double>, std::vector<double>>> &data) {
+        lib4neuro::NeuralNetwork& f,
+        arma::Mat<double>& J,
+        arma::Col<double>& rhs,
+        std::vector<std::pair<std::vector<double>, std::vector<double>>>& data) {
 
     size_t n_parameters = f.get_n_weights() + f.get_n_biases();
     size_t n_data_points = data.size();
@@ -48,18 +54,22 @@ void lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl::get_jacobian_and_rhs
 
     std::vector<std::vector<double>> jacobian;
     std::vector<double> partial_error;
-    J.reshape(dim_out, n_parameters);
+    J.reshape(dim_out,
+              n_parameters);
     rhs.resize(n_parameters);
     J.fill(0.0);
     rhs.fill(0.0);
 
     size_t row_idx = 0;
-    for ( auto item: data ){
-        f.get_jacobian( jacobian, item, partial_error );
-
-        for(size_t ri = 0; ri < jacobian.size(); ++ri){
-            for(size_t ci = 0; ci < n_parameters; ++ci){
-                J.at(row_idx, ci) = jacobian[ri][ci];
+    for (auto item: data) {
+        f.get_jacobian(jacobian,
+                       item,
+                       partial_error);
+
+        for (size_t ri = 0; ri < jacobian.size(); ++ri) {
+            for (size_t ci = 0; ci < n_parameters; ++ci) {
+                J.at(row_idx,
+                     ci) = jacobian[ri][ci];
                 rhs.at(ci) += partial_error[ri] * jacobian[ri][ci];
             }
             row_idx++;
@@ -91,7 +101,9 @@ namespace lib4neuro {
 
     void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
                                       std::ofstream* ofs) {
-        optimize(ef, LM_UPDATE_TYPE::MARQUARDT, ofs);
+        optimize(ef,
+                 LM_UPDATE_TYPE::MARQUARDT,
+                 ofs);
     }
 
     void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
@@ -99,30 +111,35 @@ namespace lib4neuro {
                                       std::ofstream* ofs) {
 
         /* Copy data set max and min values, if it's normalized */
-        if(ef.get_dataset()->is_normalized()) {
+        if (ef.get_dataset()->is_normalized()) {
             ef.get_network_instance()->set_normalization_strategy_instance(
                     ef.get_dataset()->get_normalization_strategy());
         }
 
         double current_err = ef.eval();
 
-        COUT_INFO("Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
-        if(ofs && ofs->is_open()) {
-            *ofs << "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl;
+        COUT_INFO(
+                "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
+        if (ofs && ofs->is_open()) {
+            *ofs << "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err
+                 << std::endl;
         }
 
         size_t n_parameters = ef.get_dimension();
         size_t n_data_points = ef.get_dataset()->get_n_elements();
-        if( this->p_impl->batch_size > 0 ){
+        if (this->p_impl->batch_size > 0) {
             n_data_points = this->p_impl->batch_size;
         }
         std::vector<double>* params_current = new std::vector<double>(ef.get_parameters());
 
         std::shared_ptr<std::vector<double>> params_tmp;
         params_tmp.reset(new std::vector<double>(n_parameters));
-        arma::Mat<double> J(n_data_points, n_parameters);  // Jacobian matrix
-        arma::Mat<double> H(n_data_points, n_parameters);  // Hessian matrix
-        arma::Mat<double> H_new(n_data_points, n_parameters);
+        arma::Mat<double> J(n_data_points,
+                            n_parameters);  // Jacobian matrix
+        arma::Mat<double> H(n_data_points,
+                            n_parameters);  // Hessian matrix
+        arma::Mat<double> H_new(n_data_points,
+                                n_parameters);
 
         double lambda = this->p_impl->lambda_initial;  // Dumping parameter
         double prev_err = 0, update_norm = 0, gradient_norm = 0, mem_double = 0, jacobian_norm = 1;
@@ -141,16 +158,19 @@ namespace lib4neuro {
         size_t iter_counter = 0;
         do {
 
-            if(update_J) {
+            if (update_J) {
                 /* Get Jacobian matrix */
                 std::vector<std::pair<std::vector<double>, std::vector<double>>> subset = ef.get_dataset()->get_random_data_batch(this->p_impl->batch_size);
-                this->p_impl->get_jacobian_and_rhs(*ef.get_network_instance(), J, rhs, subset);
+                this->p_impl->get_jacobian_and_rhs(*ef.get_network_instance(),
+                                                   J,
+                                                   rhs,
+                                                   subset);
 
 
                 gradient_norm = 0;
 
-                for( size_t ci = 0; ci < n_parameters; ++ci ){
-                    mem_double = rhs[ ci ];
+                for (size_t ci = 0; ci < n_parameters; ++ci) {
+                    mem_double = rhs[ci];
                     mem_double *= mem_double;
                     gradient_norm += mem_double;
                 }
@@ -160,9 +180,11 @@ namespace lib4neuro {
                 H = J.t() * J;
 
                 jacobian_norm = 0;
-                for( size_t ri = 0; ri < n_parameters; ++ri){
-                    for( size_t ci = 0; ci < n_parameters; ++ci){
-                        jacobian_norm += H.at(ri, ci) * H.at(ri, ci);
+                for (size_t ri = 0; ri < n_parameters; ++ri) {
+                    for (size_t ci = 0; ci < n_parameters; ++ci) {
+                        jacobian_norm += H.at(ri,
+                                              ci) * H.at(ri,
+                                                         ci);
                     }
                 }
                 jacobian_norm = std::sqrt(jacobian_norm);
@@ -172,15 +194,17 @@ namespace lib4neuro {
             }
 
             /* H_new = H + lambda*I */
-            H_new = H + lambda * arma::eye( n_parameters, n_parameters );
+            H_new = H + lambda * arma::eye(n_parameters,
+                                           n_parameters);
 
 
             /* Compute the update vector */
-            update = arma::solve(H_new, rhs);
+            update = arma::solve(H_new,
+                                 rhs);
 
             /* Compute the error after update of parameters */
             update_norm = 0.0;
-            for(size_t i = 0; i < n_parameters; i++) {
+            for (size_t i = 0; i < n_parameters; i++) {
                 params_tmp->at(i) = params_current->at(i) + update.at(i);
                 update_norm += update.at(i) * update.at(i);
             }
@@ -188,10 +212,10 @@ namespace lib4neuro {
             current_err = ef.eval(params_tmp.get());
 
             /* Check, if the parameter update improved the function */
-            if(current_err < prev_err) {
+            if (current_err < prev_err) {
 
                 /* If the convergence threshold is achieved, finish the computation */
-                if(current_err < this->p_impl->tolerance) {
+                if (current_err < this->p_impl->tolerance) {
                     break;
                 }
 
@@ -201,7 +225,7 @@ namespace lib4neuro {
                 //TODO rewrite without division!
                 lambda /= this->p_impl->lambda_decrease;
 
-                for(size_t i = 0; i < n_parameters; i++) {
+                for (size_t i = 0; i < n_parameters; i++) {
                     params_current->at(i) = params_tmp->at(i);
                 }
 
@@ -214,18 +238,19 @@ namespace lib4neuro {
                 update_J = false;
                 lambda *= this->p_impl->lambda_increase;
             }
-            COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << "\r");
-
+            COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: "
+                                     << gradient_norm << ", Direction norm: " << update_norm << "\r");
 
 
-        }while(iter_counter++ < this->p_impl->maximum_niters && (update_norm > this->p_impl->tolerance));
-        COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << std::endl);
+        } while (iter_counter++ < this->p_impl->maximum_niters && (update_norm > this->p_impl->tolerance));
+        COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: "
+                                 << gradient_norm << ", Direction norm: " << update_norm << std::endl);
 
         /* Store the optimized parameters */
         this->optimal_parameters = *params_current;
 
         /* Dealloc vector of parameters */
-        if(params_current) {
+        if (params_current) {
             delete params_current;
             params_current = nullptr;
         }
diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h
index 332e2384dff62d5d7808530f243e67347f8659ed..31d7feb45f20b4f52066dce28f6cfb8066680466 100644
--- a/src/LearningMethods/LevenbergMarquardt.h
+++ b/src/LearningMethods/LevenbergMarquardt.h
@@ -30,17 +30,18 @@ namespace lib4neuro {
     public:
         LevenbergMarquardt(int max_iters,
                            unsigned long bs = 0,
-                           double tolerance=1e-2,
-                           double tolerance_gradient=1e-3,
-                           double tolerance_parameters=1e-3,
-                           double LM_step_acceptance_threshold=1e-1,
-                           double lambda_initial=1e-2,
-                           double lambda_increase=11,
-                           double lambda_decrease=9);
-
-        void optimize(ErrorFunction &ef, std::ofstream* ofs = nullptr);
+                           double tolerance = 1e-2,
+                           double tolerance_gradient = 1e-3,
+                           double tolerance_parameters = 1e-3,
+                           double LM_step_acceptance_threshold = 1e-1,
+                           double lambda_initial = 1e-2,
+                           double lambda_increase = 11,
+                           double lambda_decrease = 9);
+
+        void optimize(ErrorFunction& ef,
+                      std::ofstream* ofs = nullptr);
 
-        void optimize(ErrorFunction &ef,
+        void optimize(ErrorFunction& ef,
                       LM_UPDATE_TYPE update_type,
                       std::ofstream* ofs = nullptr);
 
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index a10a0c3ddcc6e4dc6886714981264bfd87a3a691..ccc823fee556a6d52d0a26cc89121205c6216fa0 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -36,8 +36,9 @@ void Particle::randomize_coordinates() {
 
     std::random_device seeder;
     std::mt19937 gen(seeder());
-    for(unsigned int i = 0; i < this->coordinate_dim; ++i){
-        std::uniform_real_distribution<double> dist_coord(this->domain_bounds->at(2 * i), this->domain_bounds->at(2 * i + 1));
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
+        std::uniform_real_distribution<double> dist_coord(this->domain_bounds->at(2 * i),
+                                                          this->domain_bounds->at(2 * i + 1));
         (*this->coordinate)[i] = dist_coord(gen);
     }
 }
@@ -46,7 +47,8 @@ void Particle::randomize_parameters() {
 
     std::random_device seeder;
     std::mt19937 gen(seeder());
-    std::uniform_real_distribution<double> dist_vel(0.5, 1.0);
+    std::uniform_real_distribution<double> dist_vel(0.5,
+                                                    1.0);
     this->r1 = dist_vel(gen);
     this->r2 = dist_vel(gen);
     this->r3 = dist_vel(gen);
@@ -55,13 +57,15 @@ void Particle::randomize_parameters() {
 void Particle::randomize_velocity() {
     std::random_device seeder;
     std::mt19937 gen(seeder());
-    std::uniform_real_distribution<double> dist_vel(0.5, 1.0);
-    for(unsigned int i = 0; i < this->coordinate_dim; ++i){
+    std::uniform_real_distribution<double> dist_vel(0.5,
+                                                    1.0);
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
         (*this->velocity)[i] = dist_vel(gen);
     }
 }
 
-Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bounds) {
+Particle::Particle(lib4neuro::ErrorFunction* ef,
+                   std::vector<double>* domain_bounds) {
 
     this->ef = ef;
     this->domain_bounds = new std::vector<double>(*domain_bounds);
@@ -77,7 +81,7 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bou
     this->randomize_parameters();
     this->randomize_coordinates();
 
-    for(unsigned int i = 0; i < this->coordinate_dim; ++i){
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
         (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
     }
 
@@ -85,20 +89,22 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bou
 
 }
 
-Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_system, double dispersion_coeff) {
+Particle::Particle(lib4neuro::ErrorFunction* ef,
+                   std::vector<double>* central_system,
+                   double dispersion_coeff) {
 
     this->ef = ef;
 
-    if( this->domain_bounds ){
+    if (this->domain_bounds) {
         delete this->domain_bounds;
     }
 
     this->domain_bounds = new std::vector<double>(2 * central_system->size());
 
 
-    for( size_t i = 0; i < central_system->size(); ++i ){
-        this->domain_bounds->at(2 * i) = central_system->at( i ) - dispersion_coeff;
-        this->domain_bounds->at(2 * i + 1) = central_system->at( i ) + dispersion_coeff;
+    for (size_t i = 0; i < central_system->size(); ++i) {
+        this->domain_bounds->at(2 * i) = central_system->at(i) - dispersion_coeff;
+        this->domain_bounds->at(2 * i + 1) = central_system->at(i) + dispersion_coeff;
     }
 
     this->coordinate_dim = ef->get_dimension();
@@ -113,7 +119,7 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_sy
     this->randomize_parameters();
     this->randomize_coordinates();
 
-    for(unsigned int i = 0; i < this->coordinate_dim; ++i){
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
         (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
     }
 
@@ -123,15 +129,15 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_sy
 
 Particle::~Particle() {
 
-    if( this->optimal_coordinate ){
+    if (this->optimal_coordinate) {
         delete this->optimal_coordinate;
     }
 
-    if( this->coordinate ){
+    if (this->coordinate) {
         delete this->coordinate;
     }
 
-    if( this->velocity ){
+    if (this->velocity) {
         delete this->velocity;
     }
 
@@ -149,13 +155,18 @@ double Particle::get_optimal_value() {
     return this->optimal_value;
 }
 
-void Particle::get_optimal_coordinate(std::vector<double> &ref_coordinate) {
-    for( unsigned int i = 0; i < this->coordinate_dim; ++i ){
+void Particle::get_optimal_coordinate(std::vector<double>& ref_coordinate) {
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
         ref_coordinate[i] = (*this->optimal_coordinate)[i];
     }
 }
 
-double Particle::change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef) {
+double Particle::change_coordinate(double w,
+                                   double c1,
+                                   double c2,
+                                   std::vector<double>& glob_min_coord,
+                                   std::vector<std::vector<double>>& global_min_vec,
+                                   double penalty_coef) {
 
     /**
      * v = w * v + c1r1(p_min_loc - x) + c2r2(p_min_glob - x) + c3r3(random_global_min - x)
@@ -166,20 +177,21 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
     double output = 0.0;
 
     /* Choose random global minima */
-    std::vector<double> *random_global_best;
+    std::vector<double>* random_global_best;
     std::random_device rand_dev;
     std::mt19937 engine{rand_dev()};
-    std::uniform_int_distribution<size_t> dist(0, global_min_vec.size() - 1);
+    std::uniform_int_distribution<size_t> dist(0,
+                                               global_min_vec.size() - 1);
     random_global_best = &global_min_vec[dist(engine)];
 
     // TODO use std::sample to choose random vector
     //std::sample(global_min_vec.begin(), global_min_vec.end(), std::back_inserter(random_global_best), 1, std::mt19937{std::random_device{}()});
 
-    for(unsigned int i = 0; i < this->coordinate_dim; ++i) {
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
         vel_mem = w * (*this->velocity)[i]
                   + c1 * this->r1 * ((*this->optimal_coordinate)[i] - (*this->coordinate)[i])
                   + c2 * this->r2 * (glob_min_coord[i] - (*this->coordinate)[i])
-                  + (c1+c2)/2 * this->r3 * ((*random_global_best)[i] - (*this->coordinate)[i]);
+                  + (c1 + c2) / 2 * this->r3 * ((*random_global_best)[i] - (*this->coordinate)[i]);
 
         if ((*this->coordinate)[i] + vel_mem > this->domain_bounds->at(2 * i + 1)) {
             this->randomize_velocity();
@@ -194,11 +206,11 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
         }
     }
 
-    for(unsigned int i = 0; i < this->coordinate_dim; ++i){
+    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
         vel_mem = w * (*this->velocity)[i]
-                + c1 * this->r1 * ((*this->optimal_coordinate)[i] - (*this->coordinate)[i])
-                + c2 * this->r2 * (glob_min_coord[i] - (*this->coordinate)[i])
-                + (c1+c2)/2 * this->r3 * ((*random_global_best)[i] - (*this->coordinate)[i]);
+                  + c1 * this->r1 * ((*this->optimal_coordinate)[i] - (*this->coordinate)[i])
+                  + c2 * this->r2 * (glob_min_coord[i] - (*this->coordinate)[i])
+                  + (c1 + c2) / 2 * this->r3 * ((*random_global_best)[i] - (*this->coordinate)[i]);
 
 
         (*this->velocity)[i] = vel_mem;
@@ -210,9 +222,9 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
     vel_mem = this->ef->eval(this->coordinate);
     this->current_val = vel_mem;
 
-    if(vel_mem < this->optimal_value){
+    if (vel_mem < this->optimal_value) {
         this->optimal_value = vel_mem;
-        for(unsigned int i = 0; i < this->coordinate_dim; ++i){
+        for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
             (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
         }
     }
@@ -221,14 +233,14 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
 }
 
 void Particle::print_coordinate() {
-    for(unsigned int i = 0; i < this->coordinate_dim - 1; ++i){
+    for (unsigned int i = 0; i < this->coordinate_dim - 1; ++i) {
         std::cout << (*this->coordinate)[i] << " ";
     }
     std::cout << (*this->coordinate)[this->coordinate_dim - 1] << std::endl;
 }
 
 namespace lib4neuro {
-    ParticleSwarm::ParticleSwarm(std::vector<double> *domain_bounds,
+    ParticleSwarm::ParticleSwarm(std::vector<double>* domain_bounds,
                                  double c1,
                                  double c2,
                                  double w,
@@ -249,7 +261,12 @@ namespace lib4neuro {
         this->delta = delta;
         this->pst = PARTICLE_SWARM_TYPE::GENERAL;
 
-        this->init_constructor(domain_bounds, c1, c2, w, n_particles, iter_max);
+        this->init_constructor(domain_bounds,
+                               c1,
+                               c2,
+                               w,
+                               n_particles,
+                               iter_max);
     }
 
     ParticleSwarm::~ParticleSwarm() {
@@ -261,13 +278,14 @@ namespace lib4neuro {
      *
      *
      */
-    void ParticleSwarm::optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs) {
+    void ParticleSwarm::optimize(lib4neuro::ErrorFunction& ef,
+                                 std::ofstream* ofs) {
         //TODO add output to the 'ofs'
 
         COUT_INFO("Finding optima via Globalized Particle Swarm method..." << std::endl);
 
         /* Copy data set max and min values, if it's normalized */
-        if(ef.get_dataset()->is_normalized()) {
+        if (ef.get_dataset()->is_normalized()) {
             ef.get_network_instance()->set_normalization_strategy_instance(
                     ef.get_dataset()->get_normalization_strategy());
         }
@@ -285,20 +303,22 @@ namespace lib4neuro {
             if (this->particle_swarm.at(pi)) {
                 delete this->particle_swarm.at(pi);
             }
-            this->particle_swarm.at(pi) = new Particle(&ef, new std::vector<double>(ef.get_parameters()), this->radius_factor);
+            this->particle_swarm.at(pi) = new Particle(&ef,
+                                                       new std::vector<double>(ef.get_parameters()),
+                                                       this->radius_factor);
         }
         this->radius_factor *= 1.25;
 
-            this->optimal_parameters.resize(this->func_dim);
+        this->optimal_parameters.resize(this->func_dim);
 
         size_t outer_it = 0;
-        Particle *particle;
+        Particle* particle;
 
         std::vector<std::vector<double>> global_best_vec;
         double optimal_value = 0.0;
 
-        std::set<Particle *> cluster; //!< Particles in a cluster
-        std::vector<double> *centroid = new std::vector<double>(this->func_dim);//<! Centroid coordinates
+        std::set<Particle*> cluster; //!< Particles in a cluster
+        std::vector<double>* centroid = new std::vector<double>(this->func_dim);//<! Centroid coordinates
 
         double tmp_velocity;
         double prev_max_velocity = 0;
@@ -308,7 +328,8 @@ namespace lib4neuro {
         double euclidean_dist;
         double current_err = -1;
 
-        this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
+        this->determine_optimal_coordinate_and_value(this->optimal_parameters,
+                                                     optimal_value);
         COUT_INFO("Initial best value: " << optimal_value << std::endl);
 
         while (outer_it < this->iter_max) {
@@ -318,9 +339,12 @@ namespace lib4neuro {
             //////////////////////////////////////////////////
             // Clustering algorithm - termination condition //
             //////////////////////////////////////////////////
-            particle = this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
+            particle = this->determine_optimal_coordinate_and_value(this->optimal_parameters,
+                                                                    optimal_value);
 
-            if (std::find(global_best_vec.begin(), global_best_vec.end(), this->optimal_parameters) == global_best_vec.end()) {
+            if (std::find(global_best_vec.begin(),
+                          global_best_vec.end(),
+                          this->optimal_parameters) == global_best_vec.end()) {
                 global_best_vec.emplace_back(this->optimal_parameters); // TODO rewrite as std::set
             }
 
@@ -328,8 +352,10 @@ namespace lib4neuro {
 
             //for(unsigned int i=0; i < 5; i++) {
             /* Zero AVG coordinates */
-            std::fill(centroid->begin(), centroid->end(), 0);
-            std::vector<double> *c_ptr;
+            std::fill(centroid->begin(),
+                      centroid->end(),
+                      0);
+            std::vector<double>* c_ptr;
 
             /* Looking for a centroid */
             for (auto it : cluster) {
@@ -345,7 +371,10 @@ namespace lib4neuro {
 
             for (size_t pi = 0; pi < this->n_particles; pi++) {
                 particle = this->particle_swarm.at(pi);
-                tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->optimal_parameters,
+                tmp_velocity = particle->change_coordinate(this->w,
+                                                           this->c1,
+                                                           this->c2,
+                                                           this->optimal_parameters,
                                                            global_best_vec);
 
                 if (tmp_velocity > max_velocity) {
@@ -358,9 +387,11 @@ namespace lib4neuro {
 
                 // TODO - only in verbose mode
                 // only for info purposes
-                euclidean_dist += this->get_euclidean_distance(particle->get_coordinate(), centroid);
+                euclidean_dist += this->get_euclidean_distance(particle->get_coordinate(),
+                                                               centroid);
 
-                if (this->get_euclidean_distance(particle->get_coordinate(), centroid) < epsilon) {
+                if (this->get_euclidean_distance(particle->get_coordinate(),
+                                                 centroid) < epsilon) {
                     cluster.insert(particle);
                 }
             }
@@ -380,15 +411,15 @@ namespace lib4neuro {
 
             current_err = ef.eval(&this->optimal_parameters);
 
-            COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
+            COUT_DEBUG(std::string("Iteration: ") << (unsigned int) (outer_it)
                                                   << ". Total error: " << current_err
                                                   << ". Objective function value: " << optimal_value
                                                   << ".\r");
 
-            if(this->err_thresh) {
+            if (this->err_thresh) {
 
                 /* If the error threshold is given, then check the current error */
-                if(current_err <= this->err_thresh) {
+                if (current_err <= this->err_thresh) {
                     break;
                 }
             } else {
@@ -405,19 +436,22 @@ namespace lib4neuro {
             //TODO parameter for inertia weight decrease?
 
         }
-        COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
+        COUT_DEBUG(std::string("Iteration: ") << (unsigned int) (outer_it)
                                               << ". Total error: " << current_err
                                               << ". Objective function value: " << optimal_value
-                                              << "." << std::endl );
+                                              << "." << std::endl);
 
-        this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
+        this->determine_optimal_coordinate_and_value(this->optimal_parameters,
+                                                     optimal_value);
         //TODO rewrite following output using COUT_INFO
         if (outer_it < this->iter_max) {
             /* Convergence reached */
-            COUT_INFO( std::endl << "Found optimum in "  <<  outer_it << " iterations. Objective function value: " << optimal_value << std::endl);
+            COUT_INFO(std::endl << "Found optimum in " << outer_it << " iterations. Objective function value: "
+                                << optimal_value << std::endl);
         } else {
             /* Maximal number of iterations reached */
-            COUT_INFO( std::endl << "Max number of iterations reached ("  <<  outer_it << ")!  Objective function value: " << optimal_value <<std:: endl);
+            COUT_INFO(std::endl << "Max number of iterations reached (" << outer_it << ")!  Objective function value: "
+                                << optimal_value << std::endl);
         }
 
         ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
@@ -434,19 +468,25 @@ namespace lib4neuro {
                                  size_t n_particles,
                                  size_t iter_max) {
 
-        if(err_thresh <= 0 ) {
+        if (err_thresh <= 0) {
             THROW_INVALID_ARGUMENT_ERROR("Error threshold has to be greater then 0!");
         }
 
         this->err_thresh = err_thresh;
         this->pst = pst;
 
-        this->init_constructor(domain_bounds, c1, c2, w, n_particles, iter_max);
+        this->init_constructor(domain_bounds,
+                               c1,
+                               c2,
+                               w,
+                               n_particles,
+                               iter_max);
     }
 
-    Particle *ParticleSwarm::determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val) {
+    Particle* ParticleSwarm::determine_optimal_coordinate_and_value(std::vector<double>& coord,
+                                                                    double& val) {
 
-        Particle *p;
+        Particle* p;
 
         val = this->particle_swarm.at(0)->get_optimal_value();
         this->particle_swarm.at(0)->get_optimal_coordinate(coord);
@@ -466,9 +506,9 @@ namespace lib4neuro {
         return p;
     }
 
-    std::vector<double> *ParticleSwarm::get_centroid_coordinates() {
-        std::vector<double> *coords = new std::vector<double>(this->func_dim);
-        std::vector<double> *tmp;
+    std::vector<double>* ParticleSwarm::get_centroid_coordinates() {
+        std::vector<double>* coords = new std::vector<double>(this->func_dim);
+        std::vector<double>* tmp;
 
         for (size_t pi = 0; pi < this->n_particles; pi++) {
             tmp = this->particle_swarm.at(pi)->get_coordinate();
@@ -485,7 +525,8 @@ namespace lib4neuro {
         return coords;
     }
 
-    double ParticleSwarm::get_euclidean_distance(std::vector<double> *a, std::vector<double> *b) {
+    double ParticleSwarm::get_euclidean_distance(std::vector<double>* a,
+                                                 std::vector<double>* b) {
         double dist = 0, m;
         for (size_t i = 0; i < a->size(); i++) {
             m = (*a)[i] - (*b)[i];
@@ -507,7 +548,9 @@ namespace lib4neuro {
         this->w = w;
         this->n_particles = n_particles;
         this->iter_max = iter_max;
-        std::fill(this->particle_swarm.begin(), this->particle_swarm.end(), nullptr);
+        std::fill(this->particle_swarm.begin(),
+                  this->particle_swarm.end(),
+                  nullptr);
     }
 
 }
\ No newline at end of file
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index 0a80ed707c60bccc9c58dab30d4146b869aa3d84..57ce75c80a08014169d71e368f449d65575143f3 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -13,7 +13,6 @@
 #include "LearningMethod.h"
 
 
-
 /**
  *
  */
@@ -21,10 +20,10 @@ class Particle {
 private:
 
     size_t coordinate_dim;
-    std::vector<double> *coordinate = nullptr;
-    std::vector<double> *velocity = nullptr;
+    std::vector<double>* coordinate = nullptr;
+    std::vector<double>* velocity = nullptr;
 
-    std::vector<double> *optimal_coordinate = nullptr;
+    std::vector<double>* optimal_coordinate = nullptr;
     double optimal_value;
 
     double r1;
@@ -33,9 +32,9 @@ private:
 
     double current_val;
 
-    lib4neuro::ErrorFunction *ef = nullptr;
+    lib4neuro::ErrorFunction* ef = nullptr;
 
-    std::vector<double> *domain_bounds = nullptr;
+    std::vector<double>* domain_bounds = nullptr;
 
 
     void randomize_coordinates();
@@ -55,7 +54,8 @@ public:
      *
      * @param f_dim
      */
-    LIB4NEURO_API Particle(lib4neuro::ErrorFunction* ef, std::vector<double> *domain_bounds);
+    LIB4NEURO_API Particle(lib4neuro::ErrorFunction* ef,
+                           std::vector<double>* domain_bounds);
 
     /**
      *
@@ -63,9 +63,11 @@ public:
      * @param central_system
      * @param dispersion_coeff
      */
-    LIB4NEURO_API Particle(lib4neuro::ErrorFunction* ef, std::vector<double> *central_system, double dispersion_coeff);
+    LIB4NEURO_API Particle(lib4neuro::ErrorFunction* ef,
+                           std::vector<double>* central_system,
+                           double dispersion_coeff);
 
-    LIB4NEURO_API ~Particle( );
+    LIB4NEURO_API ~Particle();
 
     /**
      *
@@ -89,7 +91,7 @@ public:
      *
      * @param ref_coordinate
      */
-    LIB4NEURO_API void get_optimal_coordinate(std::vector<double> &ref_coordinate);
+    LIB4NEURO_API void get_optimal_coordinate(std::vector<double>& ref_coordinate);
 
     /**
      *
@@ -99,7 +101,12 @@ public:
      * @param glob_min_coord
      * @param penalty_coef
      */
-    LIB4NEURO_API double change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef=0.25);
+    LIB4NEURO_API double change_coordinate(double w,
+                                           double c1,
+                                           double c2,
+                                           std::vector<double>& glob_min_coord,
+                                           std::vector<std::vector<double>>& global_min_vec,
+                                           double penalty_coef = 0.25);
 };
 
 namespace lib4neuro {
@@ -123,7 +130,7 @@ namespace lib4neuro {
         /**
          * Vector of particles contained in the swarm
          */
-        std::vector<Particle *> particle_swarm; // = nullptr;
+        std::vector<Particle*> particle_swarm; // = nullptr;
 
         /**
          * Dimension of the optimized function
@@ -204,13 +211,14 @@ namespace lib4neuro {
          * @param val
          * @return
          */
-        LIB4NEURO_API Particle *determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val);
+        LIB4NEURO_API Particle* determine_optimal_coordinate_and_value(std::vector<double>& coord,
+                                                                       double& val);
 
         /**
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double> *get_centroid_coordinates();
+        LIB4NEURO_API std::vector<double>* get_centroid_coordinates();
 
         /**
          *
@@ -219,12 +227,13 @@ namespace lib4neuro {
          * @param n
          * @return
          */
-        LIB4NEURO_API double get_euclidean_distance(std::vector<double> *a, std::vector<double> *b);
+        LIB4NEURO_API double get_euclidean_distance(std::vector<double>* a,
+                                                    std::vector<double>* b);
 
         /**
          *
          */
-        void init_constructor(std::vector<double> *domain_bounds,
+        void init_constructor(std::vector<double>* domain_bounds,
                               double c1,
                               double c2,
                               double w,
@@ -247,7 +256,7 @@ namespace lib4neuro {
          * @param iter_max Maximal number of iterations - optimization will stop after that, even if not converged
          */
         LIB4NEURO_API explicit ParticleSwarm(
-                std::vector<double> *domain_bounds,
+                std::vector<double>* domain_bounds,
                 double c1 = 1.711897,
                 double c2 = 1.711897,
                 double w = 0.711897,
@@ -277,7 +286,7 @@ namespace lib4neuro {
          *                   ErrorFunction
          */
         LIB4NEURO_API explicit ParticleSwarm(
-                std::vector<double> *domain_bounds,
+                std::vector<double>* domain_bounds,
                 double err_thresh,
                 PARTICLE_SWARM_TYPE,
                 double c1 = 1.711897,
@@ -298,7 +307,8 @@ namespace lib4neuro {
          * @param epsilon
          * @param delta
          */
-        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
+        LIB4NEURO_API void optimize(lib4neuro::ErrorFunction& ef,
+                                    std::ofstream* ofs = nullptr) override;
 
     };
 
diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp
index ea0dc2d19e3d47ae13dcbb70100682d6065da520..54390919e8396485c11c997d33f0fe8b301c9f09 100644
--- a/src/LearningMethods/RandomSolution.cpp
+++ b/src/LearningMethods/RandomSolution.cpp
@@ -15,7 +15,8 @@ namespace lib4neuro {
 
     RandomSolution::~RandomSolution() {}
 
-    void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
+    void RandomSolution::optimize(lib4neuro::ErrorFunction& ef,
+                                  std::ofstream* ofs) {
         ef.get_network_instance()->randomize_parameters();
 
         this->optimal_parameters = ef.get_parameters();
diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h
index cb61a56684c080b6961d5219974cb7d8e9e2d3c9..8345853f5609b1eb25200fa19977cae2fdde550c 100644
--- a/src/LearningMethods/RandomSolution.h
+++ b/src/LearningMethods/RandomSolution.h
@@ -23,8 +23,8 @@ namespace lib4neuro {
 
         ~RandomSolution();
 
-        void optimize(lib4neuro::ErrorFunction &ef,
-                      std::ofstream *ofs = nullptr) override;
+        void optimize(lib4neuro::ErrorFunction& ef,
+                      std::ofstream* ofs = nullptr) override;
     };
 
 }
diff --git a/src/NetConnection/ConnectionFunctionConstant.cpp b/src/NetConnection/ConnectionFunctionConstant.cpp
index 02908416066ebd8c8c681951cf97329fecd12e0a..3723e6fcdb13c7a1b80830b9eb3afd56979041e0 100644
--- a/src/NetConnection/ConnectionFunctionConstant.cpp
+++ b/src/NetConnection/ConnectionFunctionConstant.cpp
@@ -20,9 +20,10 @@ ConnectionFunctionConstant::~ConnectionFunctionConstant() {
 
 }
 
-double ConnectionFunctionConstant::eval(std::vector<double> &parameter_space) {
+double ConnectionFunctionConstant::eval(std::vector<double>& parameter_space) {
     return this->weight;
 }
 
-void ConnectionFunctionConstant::eval_partial_derivative(std::vector<double> &parameter_space,
-                                                 std::vector<double> &weight_gradient, double alpha) {}
\ No newline at end of file
+void ConnectionFunctionConstant::eval_partial_derivative(std::vector<double>& parameter_space,
+                                                         std::vector<double>& weight_gradient,
+                                                         double alpha) {}
\ No newline at end of file
diff --git a/src/NetConnection/ConnectionFunctionConstant.h b/src/NetConnection/ConnectionFunctionConstant.h
index 541e5c06d01a94e616f8d63a0743edd90c015cc5..886189590342d96b223e43b416f3a582d462d625 100644
--- a/src/NetConnection/ConnectionFunctionConstant.h
+++ b/src/NetConnection/ConnectionFunctionConstant.h
@@ -11,7 +11,7 @@
 #include "../settings.h"
 #include "ConnectionFunctionGeneral.h"
 
-class ConnectionFunctionConstant:public ConnectionFunctionGeneral {
+class ConnectionFunctionConstant : public ConnectionFunctionGeneral {
 private:
     double weight;
 
@@ -26,9 +26,11 @@ public:
 
     LIB4NEURO_API ~ConnectionFunctionConstant();
 
-    LIB4NEURO_API double eval( std::vector<double> &parameter_space ) override;
+    LIB4NEURO_API double eval(std::vector<double>& parameter_space) override;
 
-    LIB4NEURO_API void eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) override;
+    LIB4NEURO_API void eval_partial_derivative(std::vector<double>& parameter_space,
+                                               std::vector<double>& weight_gradient,
+                                               double alpha) override;
 
 };
 
diff --git a/src/NetConnection/ConnectionFunctionConstantSerialization.h b/src/NetConnection/ConnectionFunctionConstantSerialization.h
index 913858ab0b1cf2884ce3c2abefa083b692d7dd2a..457f630d1f1a34ed96347ba07e05f7f2bb90f7ff 100644
--- a/src/NetConnection/ConnectionFunctionConstantSerialization.h
+++ b/src/NetConnection/ConnectionFunctionConstantSerialization.h
@@ -12,17 +12,21 @@
 
 BOOST_CLASS_EXPORT_KEY(ConnectionFunctionConstant);
 
-struct ConnectionFunctionConstant :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, ConnectionFunctionConstant& c, const unsigned int version) {
+struct ConnectionFunctionConstant::access {
+    template<class Archive>
+    static void serialize(Archive& ar,
+                          ConnectionFunctionConstant& c,
+                          const unsigned int version) {
         ar & boost::serialization::base_object<ConnectionFunctionGeneral>(c);
         ar & c.weight;
     }
 };
 
 // TODO what's the following template doing exactly?
-template void ConnectionFunctionConstant::access::serialize<boost::archive::text_oarchive>(boost::archive::text_oarchive&,
-        ConnectionFunctionConstant&, const unsigned int);
+template void
+ConnectionFunctionConstant::access::serialize<boost::archive::text_oarchive>(boost::archive::text_oarchive&,
+                                                                             ConnectionFunctionConstant&,
+                                                                             const unsigned int);
 
 namespace boost {
     namespace serialization {
@@ -35,9 +39,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, ConnectionFunctionConstant& c, const unsigned int version)
-        {
-            ConnectionFunctionConstant::access::serialize(ar, c, version);
+        void serialize(Archive& ar,
+                       ConnectionFunctionConstant& c,
+                       const unsigned int version) {
+            ConnectionFunctionConstant::access::serialize(ar,
+                                                          c,
+                                                          version);
         }
 
 
diff --git a/src/NetConnection/ConnectionFunctionGeneral.cpp b/src/NetConnection/ConnectionFunctionGeneral.cpp
index 5cbfdd22b9171e3a6996c2b091c3a7a1f71c133c..358348581cb4a69cae31aa98fc849d8f36793eb5 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.cpp
+++ b/src/NetConnection/ConnectionFunctionGeneral.cpp
@@ -14,7 +14,8 @@ BOOST_CLASS_EXPORT_IMPLEMENT(ConnectionFunctionGeneral);
 
 ConnectionFunctionGeneral::ConnectionFunctionGeneral() {}
 
-ConnectionFunctionGeneral::ConnectionFunctionGeneral(std::vector<size_t > &param_indices, std::string &function_string) {
+ConnectionFunctionGeneral::ConnectionFunctionGeneral(std::vector<size_t>& param_indices,
+                                                     std::string& function_string) {
     this->param_indices = param_indices;
 }
 
@@ -22,13 +23,15 @@ ConnectionFunctionGeneral::~ConnectionFunctionGeneral() {
 
 }
 
-double ConnectionFunctionGeneral::eval( std::vector<double> &parameter_space ) {
+double ConnectionFunctionGeneral::eval(std::vector<double>& parameter_space) {
     //TODO
     THROW_NOT_IMPLEMENTED_ERROR();
     return 0.0;
 }
 
-void ConnectionFunctionGeneral::eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) {
+void ConnectionFunctionGeneral::eval_partial_derivative(std::vector<double>& parameter_space,
+                                                        std::vector<double>& weight_gradient,
+                                                        double alpha) {
     //TODO
     THROW_NOT_IMPLEMENTED_ERROR();
 }
diff --git a/src/NetConnection/ConnectionFunctionGeneral.h b/src/NetConnection/ConnectionFunctionGeneral.h
index 6892caf19ad7dded05072a5c3006b2eb8aa94e6f..a50ace150ce35e92bfb40bc4aa0bc079f70de999 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.h
+++ b/src/NetConnection/ConnectionFunctionGeneral.h
@@ -40,25 +40,28 @@ public:
      * @param param_count
      * @param f
      */
-    LIB4NEURO_API ConnectionFunctionGeneral(std::vector<size_t> &param_indices, std::string &function_string);
+    LIB4NEURO_API ConnectionFunctionGeneral(std::vector<size_t>& param_indices,
+                                            std::string& function_string);
 
     /**
      *
      */
-    LIB4NEURO_API virtual ~ConnectionFunctionGeneral( );
+    LIB4NEURO_API virtual ~ConnectionFunctionGeneral();
 
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API virtual double eval( std::vector<double> &parameter_space );
+    LIB4NEURO_API virtual double eval(std::vector<double>& parameter_space);
 
     /**
      * Performs partial derivative of this transfer function according to all parameters. Adds the values multiplied
      * by alpha to the corresponding gradient vector
      */
-    LIB4NEURO_API virtual void eval_partial_derivative( std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha );
+    LIB4NEURO_API virtual void eval_partial_derivative(std::vector<double>& parameter_space,
+                                                       std::vector<double>& weight_gradient,
+                                                       double alpha);
 
 };
 
diff --git a/src/NetConnection/ConnectionFunctionGeneralSerialization.h b/src/NetConnection/ConnectionFunctionGeneralSerialization.h
index afc9efceeb4063f7d7e32ee66282b78cd6cf54cd..80590153810cf75a8477c504ce0ac187b63b6ced 100644
--- a/src/NetConnection/ConnectionFunctionGeneralSerialization.h
+++ b/src/NetConnection/ConnectionFunctionGeneralSerialization.h
@@ -11,14 +11,19 @@
 
 BOOST_CLASS_EXPORT_KEY(ConnectionFunctionGeneral);
 
-struct ConnectionFunctionGeneral :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, ConnectionFunctionGeneral& c, const unsigned int version) {
+struct ConnectionFunctionGeneral::access {
+    template<class Archive>
+    static void serialize(Archive& ar,
+                          ConnectionFunctionGeneral& c,
+                          const unsigned int version) {
         ar & c.param_indices;
     }
 };
 
-template void ConnectionFunctionGeneral::access::serialize<boost::archive::text_oarchive>(boost::archive::text_oarchive&, ConnectionFunctionGeneral&, const unsigned int);
+template void
+ConnectionFunctionGeneral::access::serialize<boost::archive::text_oarchive>(boost::archive::text_oarchive&,
+                                                                            ConnectionFunctionGeneral&,
+                                                                            const unsigned int);
 
 
 namespace boost {
@@ -32,9 +37,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, ConnectionFunctionGeneral& c, const unsigned int version)
-        {
-            ConnectionFunctionGeneral::access::serialize(ar, c, version);
+        void serialize(Archive& ar,
+                       ConnectionFunctionGeneral& c,
+                       const unsigned int version) {
+            ConnectionFunctionGeneral::access::serialize(ar,
+                                                         c,
+                                                         version);
         }
 
 
diff --git a/src/NetConnection/ConnectionFunctionIdentity.cpp b/src/NetConnection/ConnectionFunctionIdentity.cpp
index 10abcaae7d29abab443a4f7cbce8d3e31b3f8b92..e5bb8ab6a696510bc69581fa5bec04c3899a2851 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.cpp
+++ b/src/NetConnection/ConnectionFunctionIdentity.cpp
@@ -13,27 +13,29 @@
 
 BOOST_CLASS_EXPORT_IMPLEMENT(ConnectionFunctionIdentity);
 
-ConnectionFunctionIdentity::ConnectionFunctionIdentity( ) {
+ConnectionFunctionIdentity::ConnectionFunctionIdentity() {
     this->is_unitary = true;
 }
 
-ConnectionFunctionIdentity::ConnectionFunctionIdentity( size_t pidx ) {
+ConnectionFunctionIdentity::ConnectionFunctionIdentity(size_t pidx) {
     this->param_idx = pidx;
     this->is_unitary = false;
 }
 
-double ConnectionFunctionIdentity::eval( std::vector<double> &parameter_space ) {
+double ConnectionFunctionIdentity::eval(std::vector<double>& parameter_space) {
 
-    if( this->is_unitary ){
+    if (this->is_unitary) {
         return 1.0;
     }
 
     return parameter_space.at(this->param_idx);
 }
 
-void ConnectionFunctionIdentity::eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) {
+void ConnectionFunctionIdentity::eval_partial_derivative(std::vector<double>& parameter_space,
+                                                         std::vector<double>& weight_gradient,
+                                                         double alpha) {
 
-    if( this->is_unitary ){
+    if (this->is_unitary) {
         return;
     }
 
diff --git a/src/NetConnection/ConnectionFunctionIdentity.h b/src/NetConnection/ConnectionFunctionIdentity.h
index 52431b69a9d945e4952e5585b605c37f9343ba7d..0358f0fcc73ab11dd6008150c5a452f978be0d6c 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.h
+++ b/src/NetConnection/ConnectionFunctionIdentity.h
@@ -30,25 +30,27 @@ public:
     /**
      *
      */
-    LIB4NEURO_API ConnectionFunctionIdentity( );
+    LIB4NEURO_API ConnectionFunctionIdentity();
 
     /**
      *
      */
-    LIB4NEURO_API ConnectionFunctionIdentity( size_t pidx );
+    LIB4NEURO_API ConnectionFunctionIdentity(size_t pidx);
 
     /**
      *
      * @return
      */
-    LIB4NEURO_API double eval( std::vector<double> &parameter_space ) override;
+    LIB4NEURO_API double eval(std::vector<double>& parameter_space) override;
 
     /**
      *
      * @param weight_gradient
      * @param alpha
      */
-    LIB4NEURO_API void eval_partial_derivative(std::vector<double> &parameter_space, std::vector<double> &weight_gradient, double alpha) override;
+    LIB4NEURO_API void eval_partial_derivative(std::vector<double>& parameter_space,
+                                               std::vector<double>& weight_gradient,
+                                               double alpha) override;
 };
 
 
diff --git a/src/NetConnection/ConnectionFunctionIdentitySerialization.h b/src/NetConnection/ConnectionFunctionIdentitySerialization.h
index c0f3c5d8b31f64be22b98d82d6c1d723ca707028..beec347f7ad92428b201a4fe724a82e11c2a6ec7 100644
--- a/src/NetConnection/ConnectionFunctionIdentitySerialization.h
+++ b/src/NetConnection/ConnectionFunctionIdentitySerialization.h
@@ -12,17 +12,21 @@
 
 BOOST_CLASS_EXPORT_KEY(ConnectionFunctionIdentity);
 
-struct ConnectionFunctionIdentity :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, ConnectionFunctionIdentity& c, const unsigned int version) {
+struct ConnectionFunctionIdentity::access {
+    template<class Archive>
+    static void serialize(Archive& ar,
+                          ConnectionFunctionIdentity& c,
+                          const unsigned int version) {
         ar & boost::serialization::base_object<ConnectionFunctionGeneral>(c);
         ar & c.is_unitary;
         ar & c.param_idx;
     }
 };
 
-template void ConnectionFunctionIdentity::access::serialize<boost::archive::text_oarchive>(boost::archive::text_oarchive&,
-                                                                                           ConnectionFunctionIdentity&, const unsigned int);
+template void
+ConnectionFunctionIdentity::access::serialize<boost::archive::text_oarchive>(boost::archive::text_oarchive&,
+                                                                             ConnectionFunctionIdentity&,
+                                                                             const unsigned int);
 
 namespace boost {
     namespace serialization {
@@ -35,9 +39,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, ConnectionFunctionIdentity& c, const unsigned int version)
-        {
-            ConnectionFunctionIdentity::access::serialize(ar, c, version);
+        void serialize(Archive& ar,
+                       ConnectionFunctionIdentity& c,
+                       const unsigned int version) {
+            ConnectionFunctionIdentity::access::serialize(ar,
+                                                          c,
+                                                          version);
         }
 
     } // namespace serialization
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index 49ed2879231e822be7a6be99b6d0be88d78cd513..fd6b914870fbbc59b27fb4a267ac27e8889dd593 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -19,7 +19,6 @@ namespace lib4neuro {
     NeuralNetwork::NeuralNetwork() {
 
 
-
         this->delete_weights = true;
         this->delete_biases = true;
         this->layers_analyzed = false;
@@ -27,13 +26,14 @@ namespace lib4neuro {
 
     NeuralNetwork::NeuralNetwork(std::string filepath) {
         ::std::ifstream ifs(filepath);
-        if(ifs.is_open()) {
+        if (ifs.is_open()) {
             try {
                 boost::archive::text_iarchive ia(ifs);
                 ia >> *this;
-            }catch(boost::archive::archive_exception& e) {
-                THROW_RUNTIME_ERROR("Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
-                                                                               "the serialized DataSet.");
+            } catch (boost::archive::archive_exception& e) {
+                THROW_RUNTIME_ERROR(
+                        "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
+                                                                   "the serialized DataSet.");
             }
             ifs.close();
         } else {
@@ -45,24 +45,21 @@ namespace lib4neuro {
     NeuralNetwork::~NeuralNetwork() {
 
 
-
-
-
-
-
     }
 
-    NeuralNetwork *NeuralNetwork::get_subnet(::std::vector<size_t> &input_neuron_indices,
-                                             ::std::vector<size_t> &output_neuron_indices) {
+    NeuralNetwork* NeuralNetwork::get_subnet(::std::vector<size_t>& input_neuron_indices,
+                                             ::std::vector<size_t>& output_neuron_indices) {
 
         THROW_NOT_IMPLEMENTED_ERROR();
 
-        NeuralNetwork *output_net = nullptr;
+        NeuralNetwork* output_net = nullptr;
 // TODO rework due to the changed structure of the class
         return output_net;
     }
 
-    size_t NeuralNetwork::add_neuron(std::shared_ptr<Neuron> n, BIAS_TYPE bt, size_t bias_idx) {
+    size_t NeuralNetwork::add_neuron(std::shared_ptr<Neuron> n,
+                                     BIAS_TYPE bt,
+                                     size_t bias_idx) {
 
         if (bt == BIAS_TYPE::NO_BIAS) {
             this->neuron_bias_indices.push_back(-1);
@@ -85,7 +82,8 @@ namespace lib4neuro {
         return this->neurons.size() - 1;
     }
 
-    void NeuralNetwork::eval_single_debug(::std::vector<double> &input, ::std::vector<double> &output,
+    void NeuralNetwork::eval_single_debug(::std::vector<double>& input,
+                                          ::std::vector<double>& output,
                                           std::vector<double>* custom_weights_and_biases) {
         if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) {
             THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
@@ -107,8 +105,12 @@ namespace lib4neuro {
         this->analyze_layer_structure();
 
         /* reset of the output and the neuron potentials */
-        ::std::fill(output.begin(), output.end(), 0.0);
-        ::std::fill(this->neuron_potentials.begin(), this->neuron_potentials.end(), 0.0);
+        ::std::fill(output.begin(),
+                    output.end(),
+                    0.0);
+        ::std::fill(this->neuron_potentials.begin(),
+                    this->neuron_potentials.end(),
+                    0.0);
 
         /* set the potentials of the input neurons */
         for (size_t i = 0; i < this->input_neuron_indices.size(); ++i) {
@@ -129,8 +131,10 @@ namespace lib4neuro {
                 if (bias_idx >= 0) {
                     bias = this->neuron_biases.at(bias_idx);
                 }
-                potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si), bias);
-                std::cout << "  applying bias: " << bias << " to neuron potential: " << this->neuron_potentials.at(si) << " -> " << potential << std::endl;
+                potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si),
+                                                           bias);
+                std::cout << "  applying bias: " << bias << " to neuron potential: " << this->neuron_potentials.at(si)
+                          << " -> " << potential << std::endl;
 
                 for (auto c: *this->outward_adjacency.at(si)) {
                     size_t ti = c.first;
@@ -139,7 +143,9 @@ namespace lib4neuro {
                     this->neuron_potentials.at(ti) +=
                             this->connection_list.at(ci)->eval(this->connection_weights) * potential;
 
-                    std::cout << "  adding input to neuron " << ti << " += " << this->connection_list.at(ci)->eval(this->connection_weights) << "*" << potential << std::endl;
+                    std::cout << "  adding input to neuron " << ti << " += "
+                              << this->connection_list.at(ci)->eval(this->connection_weights) << "*" << potential
+                              << std::endl;
                 }
             }
         }
@@ -151,7 +157,8 @@ namespace lib4neuro {
             if (bias_idx >= 0) {
                 bias = this->neuron_biases.at(bias_idx);
             }
-            output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi), bias);
+            output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi),
+                                                       bias);
             std::cout << "setting the output[" << i << "] = " << output[i] << "(bias = " << bias << ")" << std::endl;
             ++i;
         }
@@ -159,7 +166,9 @@ namespace lib4neuro {
 
 
     size_t
-    NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct,
+    NeuralNetwork::add_connection_simple(size_t n1_idx,
+                                         size_t n2_idx,
+                                         SIMPLE_CONNECTION_TYPE sct,
                                          size_t weight_idx) {
 
         std::shared_ptr<ConnectionFunctionIdentity> con_weight_u1u2;
@@ -180,8 +189,12 @@ namespace lib4neuro {
 
         size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
 
-        this->add_outward_connection(n1_idx, n2_idx, conn_idx);
-        this->add_inward_connection(n2_idx, n1_idx, conn_idx);
+        this->add_outward_connection(n1_idx,
+                                     n2_idx,
+                                     conn_idx);
+        this->add_inward_connection(n2_idx,
+                                    n1_idx,
+                                    conn_idx);
 
         this->layers_analyzed = false;
 
@@ -189,26 +202,38 @@ namespace lib4neuro {
     }
 
     size_t
-    NeuralNetwork::add_connection_constant(size_t n1_idx, size_t n2_idx, double weight) {
+    NeuralNetwork::add_connection_constant(size_t n1_idx,
+                                           size_t n2_idx,
+                                           double weight) {
         std::shared_ptr<ConnectionFunctionConstant> cfc = std::make_shared<ConnectionFunctionConstant>(ConnectionFunctionConstant());
 
         size_t conn_idx = this->add_new_connection_to_list(cfc);
 
-        this->add_outward_connection(n1_idx, n2_idx, conn_idx);
-        this->add_inward_connection(n2_idx, n1_idx, conn_idx);
+        this->add_outward_connection(n1_idx,
+                                     n2_idx,
+                                     conn_idx);
+        this->add_inward_connection(n2_idx,
+                                    n1_idx,
+                                    conn_idx);
 
         this->layers_analyzed = false;
 
         return conn_idx;
     }
 
-    void NeuralNetwork::add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx,
-                                                NeuralNetwork &parent_network) {
+    void NeuralNetwork::add_existing_connection(size_t n1_idx,
+                                                size_t n2_idx,
+                                                size_t connection_idx,
+                                                NeuralNetwork& parent_network) {
 
         size_t conn_idx = this->add_new_connection_to_list(parent_network.connection_list.at(connection_idx));
 
-        this->add_outward_connection(n1_idx, n2_idx, conn_idx);
-        this->add_inward_connection(n2_idx, n1_idx, conn_idx);
+        this->add_outward_connection(n1_idx,
+                                     n2_idx,
+                                     conn_idx);
+        this->add_inward_connection(n2_idx,
+                                    n1_idx,
+                                    conn_idx);
 
         this->layers_analyzed = false;
     }
@@ -225,7 +250,7 @@ namespace lib4neuro {
         }
     }
 
-    void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork &parent_network) {
+    void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork& parent_network) {
 
         if (!this->connection_weights.empty()) {
             this->connection_weights.clear();
@@ -264,8 +289,12 @@ namespace lib4neuro {
         this->analyze_layer_structure();
 
         /* reset of the output and the neuron potentials */
-        ::std::fill(output.begin(), output.end(), 0.0);
-        ::std::fill(this->neuron_potentials.begin(), this->neuron_potentials.end(), 0.0);
+        ::std::fill(output.begin(),
+                    output.end(),
+                    0.0);
+        ::std::fill(this->neuron_potentials.begin(),
+                    this->neuron_potentials.end(),
+                    0.0);
 
         /* set the potentials of the input neurons */
         for (size_t i = 0; i < this->input_neuron_indices.size(); ++i) {
@@ -282,7 +311,8 @@ namespace lib4neuro {
                 if (bias_idx >= 0) {
                     bias = this->neuron_biases.at(bias_idx);
                 }
-                potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si), bias);
+                potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si),
+                                                           bias);
 
                 for (auto c: *this->outward_adjacency.at(si)) {
                     size_t ti = c.first;
@@ -301,26 +331,32 @@ namespace lib4neuro {
             if (bias_idx >= 0) {
                 bias = this->neuron_biases.at(bias_idx);
             }
-            output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi), bias);
+            output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi),
+                                                       bias);
             ++i;
         }
     }
 
-    void NeuralNetwork::add_to_gradient_single(std::vector<double> &input, ::std::vector<double> &error_derivative,
-                                               double error_scaling, ::std::vector<double> &gradient) {
+    void NeuralNetwork::add_to_gradient_single(std::vector<double>& input,
+                                               ::std::vector<double>& error_derivative,
+                                               double error_scaling,
+                                               ::std::vector<double>& gradient) {
 
         ::std::vector<double> scaling_backprog(this->get_n_neurons());
-        ::std::fill(scaling_backprog.begin(), scaling_backprog.end(), 0.0);
+        ::std::fill(scaling_backprog.begin(),
+                    scaling_backprog.end(),
+                    0.0);
 
         size_t bias_shift = this->get_n_weights();
         size_t neuron_idx;
         int bias_idx;
         double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;
 
-        NeuronDifferentiable *active_neuron;
+        NeuronDifferentiable* active_neuron;
 
         /* initial error propagation */
-        std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(this->neuron_layers_feedforward.size() - 1);
+        std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
+                this->neuron_layers_feedforward.size() - 1);
         //TODO might not work in the future as the output neurons could be permuted
         for (size_t i = 0; i < current_layer->size(); ++i) {
             neuron_idx = current_layer->at(i);
@@ -335,7 +371,7 @@ namespace lib4neuro {
             for (size_t i = 0; i < current_layer->size(); ++i) {
 
                 neuron_idx = current_layer->at(i);
-                active_neuron = dynamic_cast<NeuronDifferentiable *> (this->neurons.at(neuron_idx).get());
+                active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
 
                 if (active_neuron) {
                     bias_idx = this->neuron_bias_indices.at(neuron_idx);
@@ -345,7 +381,8 @@ namespace lib4neuro {
                         neuron_bias = this->neuron_biases.at(bias_idx);
                         gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
                                                            active_neuron->activation_function_eval_derivative_bias(
-                                                                   neuron_potential, neuron_bias);
+                                                                   neuron_potential,
+                                                                   neuron_bias);
                         scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
                                 neuron_potential,
                                 neuron_bias);
@@ -356,13 +393,13 @@ namespace lib4neuro {
                         size_t ti = c.first;
                         size_t ci = c.second;
 
-                        neuron_potential_t = this->neurons.at(ti)->get_last_activation_value( );
+                        neuron_potential_t = this->neurons.at(ti)->get_last_activation_value();
                         connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
 
                         this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
-                                                                               gradient,
-                                                                               neuron_potential_t *
-                                                                               scaling_backprog[neuron_idx]);
+                                                                              gradient,
+                                                                              neuron_potential_t *
+                                                                              scaling_backprog[neuron_idx]);
 
                         scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
                     }
@@ -374,18 +411,22 @@ namespace lib4neuro {
         }
     }
 
-    void NeuralNetwork::add_to_gradient_single_debug(std::vector<double> &input, ::std::vector<double> &error_derivative,
-                                                     double error_scaling, ::std::vector<double> &gradient) {
+    void NeuralNetwork::add_to_gradient_single_debug(std::vector<double>& input,
+                                                     ::std::vector<double>& error_derivative,
+                                                     double error_scaling,
+                                                     ::std::vector<double>& gradient) {
 
         ::std::vector<double> scaling_backprog(this->get_n_neurons());
-        ::std::fill(scaling_backprog.begin(), scaling_backprog.end(), 0.0);
+        ::std::fill(scaling_backprog.begin(),
+                    scaling_backprog.end(),
+                    0.0);
 
         size_t bias_shift = this->get_n_weights();
         size_t neuron_idx;
         int bias_idx;
         double neuron_potential, neuron_activation_t, neuron_bias, connection_weight;
 
-        NeuronDifferentiable *active_neuron;
+        NeuronDifferentiable* active_neuron;
 
         /* initial error propagation */
         std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
@@ -408,7 +449,7 @@ namespace lib4neuro {
             for (size_t i = 0; i < current_layer->size(); ++i) {
 
                 neuron_idx = current_layer->at(i);
-                active_neuron = dynamic_cast<NeuronDifferentiable *> (this->neurons.at(neuron_idx).get());
+                active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
 
                 if (active_neuron) {
                     std::cout << "  [backpropagation] active neuron: " << neuron_idx << std::endl;
@@ -420,28 +461,31 @@ namespace lib4neuro {
                         neuron_bias = this->neuron_biases.at(bias_idx);
                         gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
                                                            active_neuron->activation_function_eval_derivative_bias(
-                                                                   neuron_potential, neuron_bias);
+                                                                   neuron_potential,
+                                                                   neuron_bias);
                         scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
                                 neuron_potential,
                                 neuron_bias);
                     }
 
-                    std::cout << "      [backpropagation] scaling coefficient: " << scaling_backprog[neuron_idx] << std::endl;
+                    std::cout << "      [backpropagation] scaling coefficient: " << scaling_backprog[neuron_idx]
+                              << std::endl;
 
                     /* connections to lower level neurons */
                     for (auto c: *this->inward_adjacency.at(neuron_idx)) {
                         size_t ti = c.first;
                         size_t ci = c.second;
 
-                        neuron_activation_t = this->neurons.at(ti)->get_last_activation_value( );
+                        neuron_activation_t = this->neurons.at(ti)->get_last_activation_value();
                         connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
 
-                        std::cout << "      [backpropagation] value ("<<ti<< "): " << neuron_activation_t << ", scaling: " << scaling_backprog[neuron_idx] << std::endl;
+                        std::cout << "      [backpropagation] value (" << ti << "): " << neuron_activation_t
+                                  << ", scaling: " << scaling_backprog[neuron_idx] << std::endl;
 
                         this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
-                                                                               gradient,
-                                                                               neuron_activation_t *
-                                                                               scaling_backprog[neuron_idx]);
+                                                                              gradient,
+                                                                              neuron_activation_t *
+                                                                              scaling_backprog[neuron_idx]);
 
                         scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
                     }
@@ -454,7 +498,6 @@ namespace lib4neuro {
     }
 
 
-
     void NeuralNetwork::randomize_weights() {
 
         boost::random::mt19937 gen(std::time(0));
@@ -462,7 +505,8 @@ namespace lib4neuro {
         // Init weight guess ("optimal" for logistic activation functions)
         double r = 4 * sqrt(6. / (this->connection_weights.size()));
 
-        boost::random::uniform_real_distribution<> dist(-r, r);
+        boost::random::uniform_real_distribution<> dist(-r,
+                                                        r);
 
         for (size_t i = 0; i < this->connection_weights.size(); i++) {
             this->connection_weights.at(i) = dist(gen);
@@ -474,7 +518,8 @@ namespace lib4neuro {
         boost::random::mt19937 gen(std::time(0));
 
         // Init weight guess ("optimal" for logistic activation functions)
-        boost::random::uniform_real_distribution<> dist(-1, 1);
+        boost::random::uniform_real_distribution<> dist(-1,
+                                                        1);
         for (size_t i = 0; i < this->neuron_biases.size(); i++) {
             this->neuron_biases.at(i) = dist(gen);
         }
@@ -486,20 +531,20 @@ namespace lib4neuro {
     }
 
     void NeuralNetwork::scale_biases(double alpha) {
-        for(size_t i = 0; i < this->get_n_biases(); ++i){
-            this->neuron_biases.at( i ) *= alpha;
+        for (size_t i = 0; i < this->get_n_biases(); ++i) {
+            this->neuron_biases.at(i) *= alpha;
         }
     }
 
     void NeuralNetwork::scale_weights(double alpha) {
-        for(size_t i = 0; i < this->get_n_weights(); ++i){
-            this->connection_weights.at( i ) *= alpha;
+        for (size_t i = 0; i < this->get_n_weights(); ++i) {
+            this->connection_weights.at(i) *= alpha;
         }
     }
 
     void NeuralNetwork::scale_parameters(double alpha) {
-        this->scale_biases( alpha );
-        this->scale_weights( alpha );
+        this->scale_biases(alpha);
+        this->scale_weights(alpha);
     }
 
     size_t NeuralNetwork::get_n_inputs() {
@@ -526,12 +571,12 @@ namespace lib4neuro {
         return this->neurons.size();
     }
 
-    void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
+    void NeuralNetwork::specify_input_neurons(std::vector<size_t>& input_neurons_indices) {
         this->input_neuron_indices = input_neurons_indices;
-        
+
     }
 
-    void NeuralNetwork::specify_output_neurons(std::vector<size_t> &output_neurons_indices) {
+    void NeuralNetwork::specify_output_neurons(std::vector<size_t>& output_neurons_indices) {
         this->output_neuron_indices = output_neurons_indices;
     }
 
@@ -548,7 +593,7 @@ namespace lib4neuro {
     void NeuralNetwork::write_weights(std::string file_path) {
         std::ofstream ofs(file_path);
 
-        if(!ofs.is_open()) {
+        if (!ofs.is_open()) {
             THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
         }
 
@@ -576,8 +621,8 @@ namespace lib4neuro {
     void NeuralNetwork::write_biases() {
         std::cout << "Network biases: ";
 
-        if(!this->neuron_biases.empty()) {
-            for(unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
+        if (!this->neuron_biases.empty()) {
+            for (unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
                 std::cout << this->neuron_biases.at(i) << ", ";
             }
             std::cout << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
@@ -587,14 +632,14 @@ namespace lib4neuro {
     void NeuralNetwork::write_biases(std::string file_path) {
         std::ofstream ofs(file_path);
 
-        if(!ofs.is_open()) {
+        if (!ofs.is_open()) {
             THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
         }
 
         ofs << "Network biases: ";
 
-        if(!this->neuron_biases.empty()) {
-            for(unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
+        if (!this->neuron_biases.empty()) {
+            for (unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
                 ofs << this->neuron_biases.at(i) << ", ";
             }
             ofs << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
@@ -604,8 +649,8 @@ namespace lib4neuro {
     void NeuralNetwork::write_biases(std::ofstream* file_path) {
         *file_path << "Network biases: ";
 
-        if(!this->neuron_biases.empty()) {
-            for(unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
+        if (!this->neuron_biases.empty()) {
+            for (unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
                 *file_path << this->neuron_biases.at(i) << ", ";
             }
             *file_path << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
@@ -619,7 +664,7 @@ namespace lib4neuro {
                     << "Number of active weights: " << this->connection_weights.size() << ::std::endl
                     << "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
 
-        if(this->normalization_strategy) {
+        if (this->normalization_strategy) {
             ::std::cout << std::flush
                         << "Normalization strategy maximum value: "
                         << this->normalization_strategy->get_max_value() << std::endl
@@ -632,7 +677,7 @@ namespace lib4neuro {
     void NeuralNetwork::write_stats(std::string file_path) {
         std::ofstream ofs(file_path);
 
-        if(!ofs.is_open()) {
+        if (!ofs.is_open()) {
             THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
         }
 
@@ -641,7 +686,7 @@ namespace lib4neuro {
             << "Number of active weights: " << this->connection_weights.size() << ::std::endl
             << "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
 
-        if(this->normalization_strategy) {
+        if (this->normalization_strategy) {
             ofs << "Normalization strategy maximum value: "
                 << this->normalization_strategy->get_max_value() << std::endl
                 << "Normalization strategy minimum value: "
@@ -658,7 +703,7 @@ namespace lib4neuro {
                    << "Number of active weights: " << this->connection_weights.size() << ::std::endl
                    << "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
 
-        if(this->normalization_strategy) {
+        if (this->normalization_strategy) {
             *file_path << "Normalization strategy maximum value: "
                        << this->normalization_strategy->get_max_value() << std::endl
                        << "Normalization strategy minimum value: "
@@ -681,18 +726,24 @@ namespace lib4neuro {
         return this->connection_list.size() - 1;
     }
 
-    void NeuralNetwork::add_inward_connection(size_t s, size_t t, size_t con_idx) {
+    void NeuralNetwork::add_inward_connection(size_t s,
+                                              size_t t,
+                                              size_t con_idx) {
         if (!this->inward_adjacency.at(s)) {
             this->inward_adjacency.at(s) = std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0));
         }
-        this->inward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
+        this->inward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t,
+                                                                          con_idx));
     }
 
-    void NeuralNetwork::add_outward_connection(size_t s, size_t t, size_t con_idx) {
+    void NeuralNetwork::add_outward_connection(size_t s,
+                                               size_t t,
+                                               size_t con_idx) {
         if (!this->outward_adjacency.at(s)) {
             this->outward_adjacency.at(s) = std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0));
         }
-        this->outward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
+        this->outward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t,
+                                                                           con_idx));
     }
 
     void NeuralNetwork::analyze_layer_structure() {
@@ -709,15 +760,17 @@ namespace lib4neuro {
         this->neuron_layers_feedforward.clear();
 
 
-
-
         auto n = this->neurons.size();
 
         /* helpful counters */
         ::std::vector<size_t> inward_saturation(n);
         ::std::vector<size_t> outward_saturation(n);
-        ::std::fill(inward_saturation.begin(), inward_saturation.end(), 0);
-        ::std::fill(outward_saturation.begin(), outward_saturation.end(), 0);
+        ::std::fill(inward_saturation.begin(),
+                    inward_saturation.end(),
+                    0);
+        ::std::fill(outward_saturation.begin(),
+                    outward_saturation.end(),
+                    0);
         for (unsigned int i = 0; i < n; ++i) {
             if (this->inward_adjacency.at(i)) {
                 inward_saturation[i] = this->inward_adjacency.at(i)->size();
@@ -777,7 +830,6 @@ namespace lib4neuro {
         }
 
 
-
         this->layers_analyzed = true;
     }
 
@@ -794,8 +846,8 @@ namespace lib4neuro {
         return this->normalization_strategy;
     }
 
-    void NeuralNetwork::set_normalization_strategy_instance(NormalizationStrategy *ns) {
-        if(!ns) {
+    void NeuralNetwork::set_normalization_strategy_instance(NormalizationStrategy* ns) {
+        if (!ns) {
             THROW_RUNTIME_ERROR("Argument 'ns' is not initialized!");
         }
         this->normalization_strategy = ns;
@@ -806,30 +858,32 @@ namespace lib4neuro {
                                          std::ofstream* ofs) : NeuralNetwork() {
         std::vector<NEURON_TYPE> tmp;
 
-        for(auto i = 0; i < neuron_numbers->size(); i++) {
+        for (auto i = 0; i < neuron_numbers->size(); i++) {
             tmp.emplace_back(hidden_layer_neuron_type);
         }
 
-        this->init(neuron_numbers, &tmp, ofs);
+        this->init(neuron_numbers,
+                   &tmp,
+                   ofs);
     }
 
     FullyConnectedFFN::FullyConnectedFFN(std::vector<unsigned int>* neuron_numbers,
                                          std::vector<lib4neuro::NEURON_TYPE>* hidden_layer_neuron_types,
                                          std::ofstream* ofs) : NeuralNetwork() {
-        this->init(neuron_numbers, hidden_layer_neuron_types, ofs);
+        this->init(neuron_numbers,
+                   hidden_layer_neuron_types,
+                   ofs);
     }
 
     void FullyConnectedFFN::init(std::vector<unsigned int>* neuron_numbers,
                                  std::vector<NEURON_TYPE>* hidden_layer_neuron_types,
                                  std::ofstream* ofs) {
-        if(neuron_numbers->size() < 2) {
+        if (neuron_numbers->size() < 2) {
             THROW_INVALID_ARGUMENT_ERROR("Parameter 'neuron_numbers' specifying numbers of neurons in network's layers "
                                          "doesn't specify input and output layers, which are compulsory!");
         }
 
 
-
-
         this->delete_weights = true;
         this->delete_biases = true;
         this->layers_analyzed = false;
@@ -841,9 +895,12 @@ namespace lib4neuro {
         COUT_DEBUG("# of inputs: " << inp_dim << std::endl);
         COUT_DEBUG("# of outputs: " << out_dim << std::endl);
 
-        WRITE_TO_OFS_DEBUG(ofs, "Fully connected feed-forward network being constructed:" << std::endl
-                                                                                          << "# of inputs: " << inp_dim << std::endl
-                                                                                          << "# of outputs: " << out_dim << std::endl);
+        WRITE_TO_OFS_DEBUG(ofs,
+                           "Fully connected feed-forward network being constructed:" << std::endl
+                                                                                     << "# of inputs: " << inp_dim
+                                                                                     << std::endl
+                                                                                     << "# of outputs: " << out_dim
+                                                                                     << std::endl);
 
         std::vector<size_t> input_layer_neuron_indices;
         std::vector<size_t> previous_layer_neuron_indices;
@@ -852,35 +909,39 @@ namespace lib4neuro {
         /* Creation of INPUT layer neurons */
         current_layer_neuron_indices.reserve(inp_dim);
         input_layer_neuron_indices.reserve(inp_dim);
-        for(unsigned int i = 0; i < inp_dim; i++) {
+        for (unsigned int i = 0; i < inp_dim; i++) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
-            size_t neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
+            size_t neuron_id = this->add_neuron(new_neuron,
+                                                BIAS_TYPE::NO_BIAS);
             current_layer_neuron_indices.emplace_back(neuron_id);
         }
         input_layer_neuron_indices = current_layer_neuron_indices;
 
         /* Creation of HIDDEN layers */
-        for(unsigned int i = 1; i <= neuron_numbers->size()-2; i++) {
+        for (unsigned int i = 1; i <= neuron_numbers->size() - 2; i++) {
             COUT_DEBUG("Hidden layer #" << i << ": " << neuron_numbers->at(i) << " neurons" << std::endl);
-            WRITE_TO_OFS_DEBUG(ofs, "Hidden layer #" << i << ": " << neuron_numbers->at(i) << " neurons" << std::endl);
-            previous_layer_neuron_indices.reserve(neuron_numbers->at(i-1));
+            WRITE_TO_OFS_DEBUG(ofs,
+                               "Hidden layer #" << i << ": " << neuron_numbers->at(i) << " neurons" << std::endl);
+            previous_layer_neuron_indices.reserve(neuron_numbers->at(i - 1));
             previous_layer_neuron_indices = current_layer_neuron_indices;
             current_layer_neuron_indices.clear();
             current_layer_neuron_indices.reserve(neuron_numbers->at(i));
 
             /* Creation of one single hidden layer */
-            for(unsigned int j = 0; j < neuron_numbers->at(i); j++) {
+            for (unsigned int j = 0; j < neuron_numbers->at(i); j++) {
                 size_t neuron_id;
 
                 /* Create new hidden neuron */
-                                switch (hidden_layer_neuron_types->at(i-1)) {
+                switch (hidden_layer_neuron_types->at(i - 1)) {
                     case NEURON_TYPE::BINARY: {
                         std::shared_ptr<Neuron> new_neuron;
                         new_neuron.reset(new NeuronBinary());
-                        neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NEXT_BIAS);
+                        neuron_id = this->add_neuron(new_neuron,
+                                                     BIAS_TYPE::NEXT_BIAS);
                         COUT_DEBUG("Added BINARY neuron." << std::endl);
-                        WRITE_TO_OFS_DEBUG(ofs, "Added BINARY neuron." << std::endl);
+                        WRITE_TO_OFS_DEBUG(ofs,
+                                           "Added BINARY neuron." << std::endl);
                         break;
                     }
 
@@ -892,18 +953,22 @@ namespace lib4neuro {
                     case NEURON_TYPE::LINEAR: {
                         std::shared_ptr<Neuron> new_neuron;
                         new_neuron.reset(new NeuronLinear());
-                        neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NEXT_BIAS);
+                        neuron_id = this->add_neuron(new_neuron,
+                                                     BIAS_TYPE::NEXT_BIAS);
                         COUT_DEBUG("Added LINEAR neuron." << std::endl);
-                        WRITE_TO_OFS_DEBUG(ofs, "Added LINEAR neuron." << std::endl);
+                        WRITE_TO_OFS_DEBUG(ofs,
+                                           "Added LINEAR neuron." << std::endl);
                         break;
                     }
 
                     case NEURON_TYPE::LOGISTIC: {
                         std::shared_ptr<Neuron> new_neuron;
                         new_neuron.reset(new NeuronLogistic());
-                        neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NEXT_BIAS);
+                        neuron_id = this->add_neuron(new_neuron,
+                                                     BIAS_TYPE::NEXT_BIAS);
                         COUT_DEBUG("Added LOGISTIC neuron." << std::endl);
-                        WRITE_TO_OFS_DEBUG(ofs, "Added LINEAR neuron." << std::endl);
+                        WRITE_TO_OFS_DEBUG(ofs,
+                                           "Added LINEAR neuron." << std::endl);
                         break;
                     }
                 }
@@ -911,27 +976,32 @@ namespace lib4neuro {
                 current_layer_neuron_indices.emplace_back(neuron_id);
 
                 /* Connect new neuron with all neurons from the previous layer */
-                for(auto ind : previous_layer_neuron_indices) {
-                    this->add_connection_simple(ind, neuron_id, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+                for (auto ind : previous_layer_neuron_indices) {
+                    this->add_connection_simple(ind,
+                                                neuron_id,
+                                                l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
                 }
             }
         }
 
-        previous_layer_neuron_indices.reserve(neuron_numbers->back()-1);
+        previous_layer_neuron_indices.reserve(neuron_numbers->back() - 1);
         previous_layer_neuron_indices = current_layer_neuron_indices;
         current_layer_neuron_indices.clear();
         current_layer_neuron_indices.reserve(out_dim);
 
         /* Creation of OUTPUT layer neurons */
-        for(unsigned int i = 0; i < out_dim; i++) {
+        for (unsigned int i = 0; i < out_dim; i++) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
-            size_t neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
+            size_t neuron_id = this->add_neuron(new_neuron,
+                                                BIAS_TYPE::NO_BIAS);
             current_layer_neuron_indices.emplace_back(neuron_id);
 
             /* Connect new neuron with all neuron from the previous layer */
-            for(auto ind : previous_layer_neuron_indices) {
-                this->add_connection_simple(ind, neuron_id, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+            for (auto ind : previous_layer_neuron_indices) {
+                this->add_connection_simple(ind,
+                                            neuron_id,
+                                            l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
             }
         }
 
@@ -943,25 +1013,35 @@ namespace lib4neuro {
         this->analyze_layer_structure();
     }
 
-    void NeuralNetwork::get_jacobian(std::vector<std::vector<double>> &jacobian, std::pair<std::vector<double>, std::vector<double>> &data, std::vector<double> &error) {
+    void NeuralNetwork::get_jacobian(std::vector<std::vector<double>>& jacobian,
+                                     std::pair<std::vector<double>, std::vector<double>>& data,
+                                     std::vector<double>& error) {
 
         std::vector<double> fv(this->get_n_outputs());
 
         jacobian.resize(this->get_n_outputs());
         error.resize(this->get_n_outputs());
-        for(size_t i = 0; i < this->get_n_outputs(); ++i){
+        for (size_t i = 0; i < this->get_n_outputs(); ++i) {
             jacobian[i].resize(this->get_n_weights() + this->get_n_biases());
-            std::fill(jacobian[i].begin(), jacobian[i].end(), 0);
+            std::fill(jacobian[i].begin(),
+                      jacobian[i].end(),
+                      0);
         }
 
-        this->eval_single( data.first, fv );
+        this->eval_single(data.first,
+                          fv);
 
         std::vector<double> error_partial(this->get_n_outputs());
-        std::fill(error_partial.begin(), error_partial.end(), 0.0);
+        std::fill(error_partial.begin(),
+                  error_partial.end(),
+                  0.0);
 
-        for( size_t i = 0; i < this->get_n_outputs(); ++i){
+        for (size_t i = 0; i < this->get_n_outputs(); ++i) {
             error_partial[i] = 1;
-            this->add_to_gradient_single(data.first, error_partial, 1.0, jacobian[i]);
+            this->add_to_gradient_single(data.first,
+                                         error_partial,
+                                         1.0,
+                                         jacobian[i]);
             error[i] = data.second[i] - fv[i];
             error_partial[i] = 0;
         }
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index a8531d7b60b612d102a9e375550c215305e4de61..dab5e133f2579e558cae7d125916e9bf206e2956 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -146,7 +146,9 @@ namespace lib4neuro {
          * @param t Index of the target neuron
          * @param con_idx Index of the connection representing the edge
          */
-        void add_outward_connection(size_t s, size_t t, size_t con_idx);
+        void add_outward_connection(size_t s,
+                                    size_t t,
+                                    size_t con_idx);
 
         /**
          * Adds a new entry (oriented edge s <- t) to the adjacency list of this network
@@ -154,7 +156,9 @@ namespace lib4neuro {
          * @param t Index of the target neuron
          * @param con_idx Index of the connection representing the edge
          */
-        void add_inward_connection(size_t s, size_t t, size_t con_idx);
+        void add_inward_connection(size_t s,
+                                   size_t t,
+                                   size_t con_idx);
 
         /**
          * Performs one feedforward pass and feedbackward pass during which determines the layers of this neural network
@@ -172,7 +176,9 @@ namespace lib4neuro {
          * @param[out] error
          */
         LIB4NEURO_API virtual void
-        get_jacobian(std::vector<std::vector<double>> &jacobian, std::pair<std::vector<double>, std::vector<double>> &data, std::vector<double> &error);
+        get_jacobian(std::vector<std::vector<double>>& jacobian,
+                     std::pair<std::vector<double>, std::vector<double>>& data,
+                     std::vector<double>& error);
 
 
 
@@ -182,7 +188,8 @@ namespace lib4neuro {
         * @param output
         * @param custom_weights_and_biases
         */
-        LIB4NEURO_API virtual void eval_single_debug(std::vector<double> &input, std::vector<double> &output,
+        LIB4NEURO_API virtual void eval_single_debug(std::vector<double>& input,
+                                                     std::vector<double>& output,
                                                      std::vector<double>* custom_weights_and_biases = nullptr);
 
 
@@ -192,8 +199,10 @@ namespace lib4neuro {
           * @param gradient
           */
         LIB4NEURO_API virtual void
-        add_to_gradient_single_debug(std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling,
-                                     std::vector<double> &gradient);
+        add_to_gradient_single_debug(std::vector<double>& input,
+                                     std::vector<double>& error_derivative,
+                                     double error_scaling,
+                                     std::vector<double>& gradient);
 
         /**
          * Struct used to access private properties from
@@ -224,8 +233,9 @@ namespace lib4neuro {
          * @param output_neuron_indices
          * @return
          */
-        LIB4NEURO_API NeuralNetwork *
-        get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices);
+        LIB4NEURO_API NeuralNetwork*
+        get_subnet(std::vector<size_t>& input_neuron_indices,
+                   std::vector<size_t>& output_neuron_indices);
 
         /**
          * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values
@@ -238,7 +248,7 @@ namespace lib4neuro {
          * flags to not delete the vectors in this object
          * @param parent_network
          */
-        LIB4NEURO_API virtual void set_parameter_space_pointers(NeuralNetwork &parent_network);
+        LIB4NEURO_API virtual void set_parameter_space_pointers(NeuralNetwork& parent_network);
 
         /**
          *
@@ -246,7 +256,8 @@ namespace lib4neuro {
          * @param output
          * @param custom_weights_and_biases
          */
-        LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output,
+        LIB4NEURO_API virtual void eval_single(std::vector<double>& input,
+                                               std::vector<double>& output,
                                                std::vector<double>* custom_weights_and_biases = nullptr);
 
         /**
@@ -255,15 +266,19 @@ namespace lib4neuro {
          * @param gradient
          */
         LIB4NEURO_API virtual void
-        add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling,
-                               std::vector<double> &gradient);
+        add_to_gradient_single(std::vector<double>& input,
+                               std::vector<double>& error_derivative,
+                               double error_scaling,
+                               std::vector<double>& gradient);
 
         /**
          * Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
          * @param[in] n
          * @return
          */
-        LIB4NEURO_API size_t add_neuron(std::shared_ptr<Neuron> n, BIAS_TYPE bt = BIAS_TYPE::NEXT_BIAS, size_t bias_idx = 0);
+        LIB4NEURO_API size_t add_neuron(std::shared_ptr<Neuron> n,
+                                        BIAS_TYPE bt = BIAS_TYPE::NEXT_BIAS,
+                                        size_t bias_idx = 0);
 
         /**
          *
@@ -271,7 +286,8 @@ namespace lib4neuro {
          * @param n2_idx
          * @return
          */
-        LIB4NEURO_API size_t add_connection_simple(size_t n1_idx, size_t n2_idx,
+        LIB4NEURO_API size_t add_connection_simple(size_t n1_idx,
+                                                   size_t n2_idx,
                                                    SIMPLE_CONNECTION_TYPE sct = SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT,
                                                    size_t weight_idx = 0);
 
@@ -281,7 +297,9 @@ namespace lib4neuro {
          * @param n2_idx
          * @param weight
          */
-        LIB4NEURO_API size_t add_connection_constant(size_t n1_idx, size_t n2_idx, double weight);
+        LIB4NEURO_API size_t add_connection_constant(size_t n1_idx,
+                                                     size_t n2_idx,
+                                                     double weight);
 
         /**
          * Take the existing connection with index 'connection_idx' in 'parent_network' and adds it to the structure of this
@@ -292,7 +310,10 @@ namespace lib4neuro {
          * @param parent_network
          */
         LIB4NEURO_API void
-        add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx, NeuralNetwork &parent_network);
+        add_existing_connection(size_t n1_idx,
+                                size_t n2_idx,
+                                size_t connection_idx,
+                                NeuralNetwork& parent_network);
 
         /**
          *
@@ -362,13 +383,13 @@ namespace lib4neuro {
          *
          * @param input_neurons_indices
          */
-        LIB4NEURO_API void specify_input_neurons(std::vector<size_t> &input_neurons_indices);
+        LIB4NEURO_API void specify_input_neurons(std::vector<size_t>& input_neurons_indices);
 
         /**
          *
          * @param output_neurons_indices
          */
-        LIB4NEURO_API void specify_output_neurons(std::vector<size_t> &output_neurons_indices);
+        LIB4NEURO_API void specify_output_neurons(std::vector<size_t>& output_neurons_indices);
 
         /**
          *
@@ -424,13 +445,13 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API virtual std::vector<double> *get_parameter_ptr_weights();
+        LIB4NEURO_API virtual std::vector<double>* get_parameter_ptr_weights();
 
         /**
          *
          * @return
          */
-        LIB4NEURO_API virtual std::vector<double> *get_parameter_ptr_biases();
+        LIB4NEURO_API virtual std::vector<double>* get_parameter_ptr_biases();
 
         /**
          *
@@ -452,7 +473,7 @@ namespace lib4neuro {
 
     }; // class NeuralNetwork
 
-    class FullyConnectedFFN: public NeuralNetwork {
+    class FullyConnectedFFN : public NeuralNetwork {
     public:
 
         /**
@@ -474,7 +495,6 @@ namespace lib4neuro {
                                                  std::ofstream* ofs = nullptr);
 
 
-
     private:
         void init(std::vector<unsigned int>* neuron_numbers,
                   std::vector<NEURON_TYPE>* hidden_layer_neuron_types,
diff --git a/src/Network/NeuralNetworkSerialization.h b/src/Network/NeuralNetworkSerialization.h
index 028899676562a0e5f8ce732b77c2c62e4d4ab72a..6c834fbe557b80131f3511e00cbfb654f36d2ca5 100644
--- a/src/Network/NeuralNetworkSerialization.h
+++ b/src/Network/NeuralNetworkSerialization.h
@@ -25,7 +25,9 @@
 namespace lib4neuro {
     struct NeuralNetwork::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuralNetwork &nn, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuralNetwork& nn,
+                              const unsigned int version) {
             ar & nn.neurons;
             ar & nn.input_neuron_indices;
             ar & nn.output_neuron_indices;
@@ -57,9 +59,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuralNetwork & nn, const unsigned int version)
-        {
-            lib4neuro::NeuralNetwork::access::serialize(ar, nn, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuralNetwork& nn,
+                       const unsigned int version) {
+            lib4neuro::NeuralNetwork::access::serialize(ar,
+                                                        nn,
+                                                        version);
         }
 
     } // namespace serialization
diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp
index 2fc57ef9e7e23f7a2dee81708f4a7456d98bcb60..c9c0b6165c307402e59a73e1738be1b52a984bff 100644
--- a/src/Network/NeuralNetworkSum.cpp
+++ b/src/Network/NeuralNetworkSum.cpp
@@ -36,30 +36,36 @@ namespace lib4neuro {
         }
     }
 
-    void NeuralNetworkSum::add_network(NeuralNetwork *net, std::string expression_string) {
+    void NeuralNetworkSum::add_network(NeuralNetwork* net,
+                                       std::string expression_string) {
         if (!this->summand) {
-            this->summand = new std::vector<NeuralNetwork *>(0);
+            this->summand = new std::vector<NeuralNetwork*>(0);
         }
         this->summand->push_back(net);
 
         if (!this->summand_coefficient) {
-            this->summand_coefficient = new std::vector<ExprtkWrapper *>(0);
+            this->summand_coefficient = new std::vector<ExprtkWrapper*>(0);
         }
         this->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
     }
 
-    void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output,
+    void NeuralNetworkSum::eval_single(std::vector<double>& input,
+                                       std::vector<double>& output,
                                        std::vector<double>* custom_weights_and_biases) {
         std::vector<double> mem_output(output.size());
-        std::fill(output.begin(), output.end(), 0.0);
+        std::fill(output.begin(),
+                  output.end(),
+                  0.0);
 
-        NeuralNetwork *SUM;
+        NeuralNetwork* SUM;
 
         for (size_t ni = 0; ni < this->summand->size(); ++ni) {
             SUM = this->summand->at(ni);
 
             if (SUM) {
-                this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);
+                this->summand->at(ni)->eval_single(input,
+                                                   mem_output,
+                                                   custom_weights_and_biases);
 
                 double alpha = this->summand_coefficient->at(ni)->eval(input);
 
@@ -78,17 +84,22 @@ namespace lib4neuro {
 
     }
 
-    void NeuralNetworkSum::add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative,
-                                                  double error_scaling, std::vector<double> &gradient) {
+    void NeuralNetworkSum::add_to_gradient_single(std::vector<double>& input,
+                                                  std::vector<double>& error_derivative,
+                                                  double error_scaling,
+                                                  std::vector<double>& gradient) {
 
-        NeuralNetwork *SUM;
+        NeuralNetwork* SUM;
 
         for (size_t ni = 0; ni < this->summand->size(); ++ni) {
             SUM = this->summand->at(ni);
 
             if (SUM) {
                 double alpha = this->summand_coefficient->at(ni)->eval(input);
-                SUM->add_to_gradient_single(input, error_derivative, alpha * error_scaling, gradient);
+                SUM->add_to_gradient_single(input,
+                                            error_derivative,
+                                            alpha * error_scaling,
+                                            gradient);
             }
         }
     }
@@ -138,7 +149,7 @@ namespace lib4neuro {
         return 0;
     }
 
-    std::vector<double> *NeuralNetworkSum::get_parameter_ptr_weights() {
+    std::vector<double>* NeuralNetworkSum::get_parameter_ptr_weights() {
         if (this->summand) {
             return this->summand->at(0)->get_parameter_ptr_weights();
         }
@@ -146,7 +157,7 @@ namespace lib4neuro {
         return nullptr;
     }
 
-    std::vector<double> *NeuralNetworkSum::get_parameter_ptr_biases() {
+    std::vector<double>* NeuralNetworkSum::get_parameter_ptr_biases() {
         if (this->summand) {
             return this->summand->at(0)->get_parameter_ptr_biases();
         }
@@ -154,18 +165,23 @@ namespace lib4neuro {
         return nullptr;
     }
 
-    void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output,
+    void NeuralNetworkSum::eval_single_debug(std::vector<double>& input,
+                                             std::vector<double>& output,
                                              std::vector<double>* custom_weights_and_biases) {
         std::vector<double> mem_output(output.size());
-        std::fill(output.begin(), output.end(), 0.0);
+        std::fill(output.begin(),
+                  output.end(),
+                  0.0);
 
-        NeuralNetwork *SUM;
+        NeuralNetwork* SUM;
 
         for (size_t ni = 0; ni < this->summand->size(); ++ni) {
             SUM = this->summand->at(ni);
 
             if (SUM) {
-                this->summand->at(ni)->eval_single_debug(input, mem_output, custom_weights_and_biases);
+                this->summand->at(ni)->eval_single_debug(input,
+                                                         mem_output,
+                                                         custom_weights_and_biases);
 
                 double alpha = this->summand_coefficient->at(ni)->eval(input);
 
diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h
index fc6736079f09a8885a1c2cc53e85d5df09e7f009..0c9e55d7ce70ffcd5d7e8fe1ee7b66ca5cdbb687 100644
--- a/src/Network/NeuralNetworkSum.h
+++ b/src/Network/NeuralNetworkSum.h
@@ -17,8 +17,8 @@ namespace lib4neuro {
 
     class NeuralNetworkSum : public NeuralNetwork {
     private:
-        std::vector<NeuralNetwork *> *summand;
-        std::vector<ExprtkWrapper *> *summand_coefficient;
+        std::vector<NeuralNetwork*>* summand;
+        std::vector<ExprtkWrapper*>* summand_coefficient;
 
 
     public:
@@ -32,7 +32,8 @@ namespace lib4neuro {
 
         LIB4NEURO_API virtual ~NeuralNetworkSum();
 
-        LIB4NEURO_API void add_network(NeuralNetwork *net, std::string expression_string);
+        LIB4NEURO_API void add_network(NeuralNetwork* net,
+                                       std::string expression_string);
 
         /**
          *
@@ -40,7 +41,8 @@ namespace lib4neuro {
          * @param output
          * @param custom_weights_and_biases
          */
-        LIB4NEURO_API void eval_single(std::vector<double> &input, std::vector<double> &output,
+        LIB4NEURO_API void eval_single(std::vector<double>& input,
+                                       std::vector<double>& output,
                                        std::vector<double>* custom_weights_and_biases = nullptr) override;
 
         /**
@@ -49,7 +51,8 @@ namespace lib4neuro {
          * @param output
          * @param custom_weights_and_biases
          */
-        LIB4NEURO_API void eval_single_debug(std::vector<double> &input, std::vector<double> &output,
+        LIB4NEURO_API void eval_single_debug(std::vector<double>& input,
+                                             std::vector<double>& output,
                                              std::vector<double>* custom_weights_and_biases = nullptr) override;
 
 
@@ -59,8 +62,10 @@ namespace lib4neuro {
          * @param gradient
          */
         LIB4NEURO_API void
-        add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling,
-                               std::vector<double> &gradient) override;
+        add_to_gradient_single(std::vector<double>& input,
+                               std::vector<double>& error_derivative,
+                               double error_scaling,
+                               std::vector<double>& gradient) override;
 
         /**
          *
@@ -96,14 +101,14 @@ namespace lib4neuro {
          * @return
          */
         //TODO only works if all the networks share the same parameters
-        LIB4NEURO_API std::vector<double> *get_parameter_ptr_weights() override;
+        LIB4NEURO_API std::vector<double>* get_parameter_ptr_weights() override;
 
         /**
          *
          * @return
          */
         //TODO only works if all the networks share the same parameters
-        LIB4NEURO_API std::vector<double> *get_parameter_ptr_biases() override;
+        LIB4NEURO_API std::vector<double>* get_parameter_ptr_biases() override;
     };
 
 }
diff --git a/src/Network/NeuralNetworkSumSerialization.h b/src/Network/NeuralNetworkSumSerialization.h
index 5867fc2a5fd33af49798170007e4918a02a4fda8..beb6fe9ad44d0c05f48263c64f557c9fcfee9fbd 100644
--- a/src/Network/NeuralNetworkSumSerialization.h
+++ b/src/Network/NeuralNetworkSumSerialization.h
@@ -15,7 +15,9 @@ BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuralNetworkSum);
 namespace lib4neuro {
     struct NeuralNetworkSum::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuralNetworkSum &n, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuralNetworkSum& n,
+                              const unsigned int version) {
             ar & boost::serialization::base_object<NeuralNetwork>(n);
             ar & n.summand;
             ar & n.summand_coefficient;
@@ -34,9 +36,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuralNetworkSum& n, const unsigned int version)
-        {
-            lib4neuro::NeuralNetworkSum::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuralNetworkSum& n,
+                       const unsigned int version) {
+            lib4neuro::NeuralNetworkSum::access::serialize(ar,
+                                                           n,
+                                                           version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/Neuron.h b/src/Neuron/Neuron.h
index cebea7010b5a72a3e35b7efe3767b953788dff8c..f3689d671af1074b9735d5dace40c4a06f1481fc 100644
--- a/src/Neuron/Neuron.h
+++ b/src/Neuron/Neuron.h
@@ -54,13 +54,14 @@ namespace lib4neuro {
         /**
          * Performs the activation function and returns the result
          */
-        LIB4NEURO_API virtual double activate(double x, double b) = 0;
+        LIB4NEURO_API virtual double activate(double x,
+                                              double b) = 0;
 
         /**
          * returns the last value of the actual activation function output for this neuron
          * @return
          */
-        LIB4NEURO_API virtual double get_last_activation_value( );
+        LIB4NEURO_API virtual double get_last_activation_value();
 
     }; /* end of Neuron class */
 
@@ -83,21 +84,23 @@ namespace lib4neuro {
          * Calculates the derivative with respect to the argument, ie the 'potential'
          * @return f'(x), where 'f(x)' is the activation function and 'x' = 'potential'
          */
-        virtual double activation_function_eval_derivative(double x, double b) = 0;
+        virtual double activation_function_eval_derivative(double x,
+                                                           double b) = 0;
 
         /**
          * Calculates the derivative with respect to the bias
          * @return d/db f'(x), where 'f(x)' is the activation function, 'x' is the 'potential'
          * and 'b' is the bias
          */
-        virtual double activation_function_eval_derivative_bias(double x, double b) = 0;
+        virtual double activation_function_eval_derivative_bias(double x,
+                                                                double b) = 0;
 
         /**
          * Returns a Neuron pointer object with activation function being the partial derivative of
          * the activation function of this Neuron object with respect to the argument, i.e. 'potential'
          * @return
          */
-        virtual Neuron *get_derivative() = 0;
+        virtual Neuron* get_derivative() = 0;
     };
 
 }
diff --git a/src/Neuron/NeuronBiased.cpp b/src/Neuron/NeuronBiased.cpp
index fa99938e37cfe4751dec896d863943c8df98912d..834c99a8118f9640ac71fcf7c3246c511e12191b 100644
--- a/src/Neuron/NeuronBiased.cpp
+++ b/src/Neuron/NeuronBiased.cpp
@@ -14,26 +14,29 @@
 
 BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronBiased);
 
-namespace lib4neuro{
+namespace lib4neuro {
 
     NeuronBiased::NeuronBiased(double b) {
         this->bias = b;
     }
 
-    double NeuronBiased::activate(double x, double b) {
+    double NeuronBiased::activate(double x,
+                                  double b) {
         return x + this->bias;
     }
 
-    double NeuronBiased::activation_function_eval_derivative(double x, double b) {
+    double NeuronBiased::activation_function_eval_derivative(double x,
+                                                             double b) {
         return 1.0;
     }
 
-    double NeuronBiased::activation_function_eval_derivative_bias(double x, double b) {
+    double NeuronBiased::activation_function_eval_derivative_bias(double x,
+                                                                  double b) {
         return 0.0;
     }
 
     Neuron* NeuronBiased::get_derivative() {
-        NeuronConstant *output = new NeuronConstant(1.0);
+        NeuronConstant* output = new NeuronConstant(1.0);
         return output;
     }
 
diff --git a/src/Neuron/NeuronBiased.h b/src/Neuron/NeuronBiased.h
index a8e10422ec4bf9159da8ad90365e85af84aecec5..6525f71b148952e463ee3b952a53a3a7a34873e9 100644
--- a/src/Neuron/NeuronBiased.h
+++ b/src/Neuron/NeuronBiased.h
@@ -11,7 +11,7 @@
 #include "Neuron.h"
 
 namespace lib4neuro {
-    class NeuronBiased: public NeuronDifferentiable {
+    class NeuronBiased : public NeuronDifferentiable {
 
     private:
 
@@ -30,12 +30,13 @@ namespace lib4neuro {
          * f(x) = x + b
          * @param[in] b Bias
          */
-        LIB4NEURO_API explicit NeuronBiased( double b = 0 );
+        LIB4NEURO_API explicit NeuronBiased(double b = 0);
 
         /**
          * Evaluates 'x + this->bias' and stores the result into the 'state' property
          */
-        LIB4NEURO_API double activate( double x, double b ) override;
+        LIB4NEURO_API double activate(double x,
+                                      double b) override;
 
         /**
          * Calculates the partial derivative of the activation function
@@ -43,19 +44,21 @@ namespace lib4neuro {
          * @return Partial derivative of the activation function according to the
          * 'bias' parameter. Returns 0.0
          */
-        LIB4NEURO_API double activation_function_eval_derivative_bias( double x, double b ) override;
+        LIB4NEURO_API double activation_function_eval_derivative_bias(double x,
+                                                                      double b) override;
 
         /**
          * Calculates d/dx of (x + this->bias) at point x
          * @return 1.0
          */
-        LIB4NEURO_API double activation_function_eval_derivative( double x, double b ) override;
+        LIB4NEURO_API double activation_function_eval_derivative(double x,
+                                                                 double b) override;
 
         /**
          * Returns a pointer to a Neuron with derivative as its activation function
          * @return
          */
-        LIB4NEURO_API Neuron* get_derivative( ) override;
+        LIB4NEURO_API Neuron* get_derivative() override;
     };
 
 }//end of namespace lib4neuro
diff --git a/src/Neuron/NeuronBiasedSerialization.h b/src/Neuron/NeuronBiasedSerialization.h
index 2cafc3d6e8740934090961c9b9e3888e7ad7255b..291047ff766223bfb759beaa715b5331e6b05b0d 100644
--- a/src/Neuron/NeuronBiasedSerialization.h
+++ b/src/Neuron/NeuronBiasedSerialization.h
@@ -11,9 +11,11 @@
 
 BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronBiased);
 
-struct lib4neuro :: NeuronBiased :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, lib4neuro::NeuronBiased& n, const unsigned int version) {
+struct lib4neuro::NeuronBiased::access {
+    template<class Archive>
+    static void serialize(Archive& ar,
+                          lib4neuro::NeuronBiased& n,
+                          const unsigned int version) {
         ar & boost::serialization::base_object<lib4neuro::Neuron>(n);
         ar & n.bias;
     }
@@ -30,9 +32,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuronBiased& n, const unsigned int version)
-        {
-            lib4neuro::NeuronBiased::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronBiased& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronBiased::access::serialize(ar,
+                                                       n,
+                                                       version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronBinary.cpp b/src/Neuron/NeuronBinary.cpp
index b421e1f57f4f6cbc0e8b1a0e984e8d31e901278b..a862499b1bdcdcfc11bfb508627fcdbd51d0970d 100644
--- a/src/Neuron/NeuronBinary.cpp
+++ b/src/Neuron/NeuronBinary.cpp
@@ -9,7 +9,8 @@ BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronBinary);
 namespace lib4neuro {
     NeuronBinary::NeuronBinary() {}
 
-    double NeuronBinary::activate(double x, double b) {
+    double NeuronBinary::activate(double x,
+                                  double b) {
 
         if (x >= b) {
             this->activation_val = 1.0;
diff --git a/src/Neuron/NeuronBinary.h b/src/Neuron/NeuronBinary.h
index 1992b03fa3495787b9da990ebe54139d1b59c6f6..dac7f6e896d71c2ac9e2cb2e7410e5ea7517a49e 100644
--- a/src/Neuron/NeuronBinary.h
+++ b/src/Neuron/NeuronBinary.h
@@ -37,7 +37,8 @@ namespace lib4neuro {
         /**
          * Performs the activation function and stores the result into the 'state' property
          */
-        LIB4NEURO_API double activate(double x, double b) override;
+        LIB4NEURO_API double activate(double x,
+                                      double b) override;
 
     };
 
diff --git a/src/Neuron/NeuronBinarySerialization.h b/src/Neuron/NeuronBinarySerialization.h
index 4e3191e1ec2a82e14d14a4065aee72f3409946d6..3506f03903ecf774129a66aa15b598032bec8341 100644
--- a/src/Neuron/NeuronBinarySerialization.h
+++ b/src/Neuron/NeuronBinarySerialization.h
@@ -12,9 +12,11 @@
 
 BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronBinary);
 
-struct lib4neuro :: NeuronBinary :: access {
-    template <class Archive>
-    static void serialize(Archive &ar, lib4neuro::NeuronBinary& n, const unsigned int version) {
+struct lib4neuro::NeuronBinary::access {
+    template<class Archive>
+    static void serialize(Archive& ar,
+                          lib4neuro::NeuronBinary& n,
+                          const unsigned int version) {
         ar & boost::serialization::base_object<lib4neuro::Neuron>(n);
     }
 };
@@ -30,9 +32,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuronBinary& n, const unsigned int version)
-        {
-            lib4neuro::NeuronBinary::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronBinary& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronBinary::access::serialize(ar,
+                                                       n,
+                                                       version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronConstant.cpp b/src/Neuron/NeuronConstant.cpp
index b2965c45023c0de135340c1ce74c7b7b940f6e29..f342d532018f3715f226f097568af16f2fb11c5f 100644
--- a/src/Neuron/NeuronConstant.cpp
+++ b/src/Neuron/NeuronConstant.cpp
@@ -18,21 +18,24 @@ namespace lib4neuro {
         this->p = c;
     }
 
-    double NeuronConstant::activate(double x, double b) {
+    double NeuronConstant::activate(double x,
+                                    double b) {
         this->activation_val = this->p;
         return this->activation_val;
     }
 
-    double NeuronConstant::activation_function_eval_derivative_bias(double x, double b) {
+    double NeuronConstant::activation_function_eval_derivative_bias(double x,
+                                                                    double b) {
         return 0.0;
     }
 
-    double NeuronConstant::activation_function_eval_derivative(double x, double b) {
+    double NeuronConstant::activation_function_eval_derivative(double x,
+                                                               double b) {
         return 0.0;
     }
 
-    Neuron *NeuronConstant::get_derivative() {
-        NeuronConstant *output = new NeuronConstant();
+    Neuron* NeuronConstant::get_derivative() {
+        NeuronConstant* output = new NeuronConstant();
         return output;
     }
 
diff --git a/src/Neuron/NeuronConstant.h b/src/Neuron/NeuronConstant.h
index 71e43013630f8d5f6bf1caded440113468719505..fa745fa079557ba6eba580731c843282c7965578 100644
--- a/src/Neuron/NeuronConstant.h
+++ b/src/Neuron/NeuronConstant.h
@@ -34,7 +34,8 @@ namespace lib4neuro {
         /**
          * Evaluates and returns 'c'
          */
-        LIB4NEURO_API double activate(double x, double b) override;
+        LIB4NEURO_API double activate(double x,
+                                      double b) override;
 
         /**
          * Calculates the partial derivative of the activation function
@@ -42,19 +43,21 @@ namespace lib4neuro {
          * @return Partial derivative of the activation function according to the
          * 'bias' parameter. Returns 0.0
          */
-        LIB4NEURO_API double activation_function_eval_derivative_bias(double x, double b) override;
+        LIB4NEURO_API double activation_function_eval_derivative_bias(double x,
+                                                                      double b) override;
 
         /**
          * Calculates d/dx of (c) at point x
          * @return 0.0
          */
-        LIB4NEURO_API double activation_function_eval_derivative(double x, double b) override;
+        LIB4NEURO_API double activation_function_eval_derivative(double x,
+                                                                 double b) override;
 
         /**
          * Returns a pointer to a Neuron with derivative as its activation function
          * @return
          */
-        LIB4NEURO_API Neuron *get_derivative() override;
+        LIB4NEURO_API Neuron* get_derivative() override;
     };
 
 }
diff --git a/src/Neuron/NeuronConstantSerialization.h b/src/Neuron/NeuronConstantSerialization.h
index 6b658f479173de3f2bebebd2c63e3e94b17e05e9..d9d52bf97431545c4baab3f5ef1d94ec4f0b4047 100644
--- a/src/Neuron/NeuronConstantSerialization.h
+++ b/src/Neuron/NeuronConstantSerialization.h
@@ -15,7 +15,9 @@ BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronConstant);
 namespace lib4neuro {
     struct NeuronConstant::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuronConstant &n, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuronConstant& n,
+                              const unsigned int version) {
             ar & boost::serialization::base_object<Neuron>(n);
             ar & n.p;
         }
@@ -33,9 +35,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuronConstant& n, const unsigned int version)
-        {
-            lib4neuro::NeuronConstant::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronConstant& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronConstant::access::serialize(ar,
+                                                         n,
+                                                         version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronLinear.cpp b/src/Neuron/NeuronLinear.cpp
index fbcb64f37fa106647fa17bb353f70cfa71c44bca..7b35d66aec0c8e6f0aff5460c55850d38c6e2819 100644
--- a/src/Neuron/NeuronLinear.cpp
+++ b/src/Neuron/NeuronLinear.cpp
@@ -11,21 +11,24 @@ BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronLinear);
 namespace lib4neuro {
     NeuronLinear::NeuronLinear() {}
 
-    double NeuronLinear::activate(double x, double b) {
+    double NeuronLinear::activate(double x,
+                                  double b) {
         this->activation_val = x + b;
         return this->activation_val;
     }
 
-    double NeuronLinear::activation_function_eval_derivative_bias(double x, double b) {
+    double NeuronLinear::activation_function_eval_derivative_bias(double x,
+                                                                  double b) {
         return 1.0;
     }
 
-    double NeuronLinear::activation_function_eval_derivative(double x, double b) {
+    double NeuronLinear::activation_function_eval_derivative(double x,
+                                                             double b) {
         return 1.0;
     }
 
-    Neuron *NeuronLinear::get_derivative() {
-        NeuronConstant *output = new NeuronConstant(1.0);
+    Neuron* NeuronLinear::get_derivative() {
+        NeuronConstant* output = new NeuronConstant(1.0);
         return output;
     }
 
diff --git a/src/Neuron/NeuronLinear.h b/src/Neuron/NeuronLinear.h
index d7043f932180790965b2a55b4ba8285ff949e469..c6ef7db7dfe4f1e0b9cd6f5a5bdf68b8535a3671 100644
--- a/src/Neuron/NeuronLinear.h
+++ b/src/Neuron/NeuronLinear.h
@@ -18,7 +18,7 @@ namespace lib4neuro {
      * Linear neuron class - uses activation function in the form f(x)=a*x + b,
      * 'x' being the neuron's potential
      */
-    class NeuronLinear:public NeuronDifferentiable {
+    class NeuronLinear : public NeuronDifferentiable {
 
     public:
 
@@ -33,12 +33,13 @@ namespace lib4neuro {
          * f(x) = x + b
          * @param[in] b Bias
          */
-        LIB4NEURO_API explicit NeuronLinear( );
+        LIB4NEURO_API explicit NeuronLinear();
 
         /**
          * Evaluates 'x + b' and stores the result into the 'state' property
          */
-        LIB4NEURO_API double activate( double x, double b ) override;
+        LIB4NEURO_API double activate(double x,
+                                      double b) override;
 
         /**
          * Calculates the partial derivative of the activation function
@@ -46,19 +47,21 @@ namespace lib4neuro {
          * @return Partial derivative of the activation function according to the
          * 'bias' parameter. Returns 1.0
          */
-        LIB4NEURO_API double activation_function_eval_derivative_bias( double x, double b ) override;
+        LIB4NEURO_API double activation_function_eval_derivative_bias(double x,
+                                                                      double b) override;
 
         /**
          * Calculates d/dx of (x + b) at point x
          * @return 1.0
          */
-        LIB4NEURO_API double activation_function_eval_derivative( double x, double b ) override;
+        LIB4NEURO_API double activation_function_eval_derivative(double x,
+                                                                 double b) override;
 
         /**
          * Returns a pointer to a Neuron with derivative as its activation function
          * @return
          */
-        LIB4NEURO_API Neuron* get_derivative( ) override;
+        LIB4NEURO_API Neuron* get_derivative() override;
 
     };
 
diff --git a/src/Neuron/NeuronLinearSerialization.h b/src/Neuron/NeuronLinearSerialization.h
index 8f3fc12a6948fdb03ba3194caccfd75353dc8c9d..5d3cbef507e7cb06f88e564dab28e6629e55e634 100644
--- a/src/Neuron/NeuronLinearSerialization.h
+++ b/src/Neuron/NeuronLinearSerialization.h
@@ -15,7 +15,9 @@ BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronLinear);
 namespace lib4neuro {
     struct NeuronLinear::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuronLinear &n, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuronLinear& n,
+                              const unsigned int version) {
             ar & boost::serialization::base_object<Neuron>(n);
         }
     };
@@ -32,9 +34,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuronLinear& n, const unsigned int version)
-        {
-            lib4neuro::NeuronLinear::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronLinear& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronLinear::access::serialize(ar,
+                                                       n,
+                                                       version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronLogistic.cpp b/src/Neuron/NeuronLogistic.cpp
index 22443e2a6821f65e4886c78be91d0d743a154b91..b8b296f2d0a6112d8c925563f55395b2420f882e 100644
--- a/src/Neuron/NeuronLogistic.cpp
+++ b/src/Neuron/NeuronLogistic.cpp
@@ -13,34 +13,42 @@ namespace lib4neuro {
 
     NeuronLogistic_d2::NeuronLogistic_d2() {}
 
-    double NeuronLogistic_d2::activate(double x, double b) {
+    double NeuronLogistic_d2::activate(double x,
+                                       double b) {
         //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
 
-        double ex = std::pow(lib4neuro::E, x);
-        double eb = std::pow(E, b);
+        double ex = std::pow(lib4neuro::E,
+                             x);
+        double eb = std::pow(E,
+                             b);
         double denom = (eb + ex);
 
         this->activation_val = (eb * ex * (eb - ex)) / (denom * denom * denom);
         return this->activation_val;
     }
 
-    double NeuronLogistic_d2::activation_function_eval_derivative_bias(double x, double b) {
+    double NeuronLogistic_d2::activation_function_eval_derivative_bias(double x,
+                                                                       double b) {
         //-(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
 
-        double eb = std::pow(E, b);
-        double ex = std::pow(E, x);
+        double eb = std::pow(E,
+                             b);
+        double ex = std::pow(E,
+                             x);
         double ebex = eb * ex;
         double denom = (eb + ex);
 
         return -(ebex * (-4 * ebex + eb * eb + ex * ex)) / (denom * denom * denom * denom);
     }
 
-    double NeuronLogistic_d2::activation_function_eval_derivative(double x, double b) {
+    double NeuronLogistic_d2::activation_function_eval_derivative(double x,
+                                                                  double b) {
         //(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
-        return -this->activation_function_eval_derivative_bias(x, b);
+        return -this->activation_function_eval_derivative_bias(x,
+                                                               b);
     }
 
-    NeuronLogistic *NeuronLogistic_d2::get_derivative() {
+    NeuronLogistic* NeuronLogistic_d2::get_derivative() {
         //TODO maybe not the best way
         return nullptr;
     }
@@ -48,11 +56,14 @@ namespace lib4neuro {
     NeuronLogistic_d1::NeuronLogistic_d1() {}
 
 
-    double NeuronLogistic_d1::activate(double x, double b) {
+    double NeuronLogistic_d1::activate(double x,
+                                       double b) {
         //e^(b - x)/(e^(b - x) + 1)^2
 
-        double ex = std::pow(E, x);
-        double eb = std::pow(E, b);
+        double ex = std::pow(E,
+                             x);
+        double eb = std::pow(E,
+                             b);
         double d = (eb / ex);
         double denom = (d + 1);
 
@@ -60,24 +71,29 @@ namespace lib4neuro {
         return this->activation_val;
     }
 
-    double NeuronLogistic_d1::activation_function_eval_derivative_bias(double x, double b) {
+    double NeuronLogistic_d1::activation_function_eval_derivative_bias(double x,
+                                                                       double b) {
         //(e^(b + x) (e^x - e^b))/(e^b + e^x)^3
 
-        double ex = std::pow(E, x);
-        double eb = std::pow(E, b);
+        double ex = std::pow(E,
+                             x);
+        double eb = std::pow(E,
+                             b);
         double denom = (eb + ex);
 
         return (eb * ex * (ex - eb)) / (denom * denom * denom);
     }
 
-    double NeuronLogistic_d1::activation_function_eval_derivative(double x, double b) {
+    double NeuronLogistic_d1::activation_function_eval_derivative(double x,
+                                                                  double b) {
         //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-        return -this->activation_function_eval_derivative_bias(x, b);
+        return -this->activation_function_eval_derivative_bias(x,
+                                                               b);
     }
 
-    NeuronLogistic *NeuronLogistic_d1::get_derivative() {
+    NeuronLogistic* NeuronLogistic_d1::get_derivative() {
         //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
-        NeuronLogistic_d2 *output = nullptr;
+        NeuronLogistic_d2* output = nullptr;
 
         output = new NeuronLogistic_d2();
 
@@ -86,17 +102,21 @@ namespace lib4neuro {
 
     NeuronLogistic::NeuronLogistic() {}
 
-    double NeuronLogistic::activate(double x, double b) {
+    double NeuronLogistic::activate(double x,
+                                    double b) {
         //(1 + e^(-x + b))^(-1)
 
-        double ex = std::pow(E, b - x);
+        double ex = std::pow(E,
+                             b - x);
 
         this->activation_val = 1.0 / (1.0 + ex);
         return this->activation_val;
     }
 
-    double NeuronLogistic::activation_function_eval_derivative_bias(double x, double b) {
-        double ex = std::pow(E, b - x);
+    double NeuronLogistic::activation_function_eval_derivative_bias(double x,
+                                                                    double b) {
+        double ex = std::pow(E,
+                             b - x);
         double denom = (ex + 1);
         double res = -ex / (denom * denom);
 
@@ -104,15 +124,17 @@ namespace lib4neuro {
     }
 
 
-    double NeuronLogistic::activation_function_eval_derivative(double x, double b) {
+    double NeuronLogistic::activation_function_eval_derivative(double x,
+                                                               double b) {
         //e^(b - x)/(e^(b - x) + 1)^2
-        return -this->activation_function_eval_derivative_bias(x, b);
+        return -this->activation_function_eval_derivative_bias(x,
+                                                               b);
 
     }
 
-    NeuronLogistic *NeuronLogistic::get_derivative() {
+    NeuronLogistic* NeuronLogistic::get_derivative() {
 
-        NeuronLogistic_d1 *output = nullptr;
+        NeuronLogistic_d1* output = nullptr;
         output = new NeuronLogistic_d1();
 
         return output;
diff --git a/src/Neuron/NeuronLogistic.h b/src/Neuron/NeuronLogistic.h
index 9daf384bf05def06eec5038880371d23cf175d39..ca9386891c283b577154dec146d4fb9f9354099a 100644
--- a/src/Neuron/NeuronLogistic.h
+++ b/src/Neuron/NeuronLogistic.h
@@ -35,7 +35,8 @@ namespace lib4neuro {
         /**
          * Evaluates '(1 + e^(-x + b))^(-1)' and stores the result into the 'state' property
          */
-        LIB4NEURO_API virtual double activate(double x, double b) override;
+        LIB4NEURO_API virtual double activate(double x,
+                                              double b) override;
 
         /**
          * Calculates the partial derivative of the activation function
@@ -43,18 +44,20 @@ namespace lib4neuro {
          * @return Partial derivative of the activation function according to the
          * bias, returns: -e^(b - x)/(e^(b - x) + 1)^2
          */
-        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x, double b) override;
+        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x,
+                                                                              double b) override;
         /**
          * Calculates d/dx of (1 + e^(-x + b))^(-1)
          * @return e^(b - x)/(e^(b - x) + 1)^2
          */
-        LIB4NEURO_API virtual double activation_function_eval_derivative(double x, double b) override;
+        LIB4NEURO_API virtual double activation_function_eval_derivative(double x,
+                                                                         double b) override;
 
         /**
          * Returns a pointer to a Neuron with derivative as its activation function
          * @return
          */
-        LIB4NEURO_API virtual NeuronLogistic *get_derivative() override;
+        LIB4NEURO_API virtual NeuronLogistic* get_derivative() override;
     };
 
 
@@ -78,7 +81,8 @@ namespace lib4neuro {
         /**
          * Evaluates 'e^(b - x)/(e^(b - x) + 1)^2' and returns the result
          */
-        LIB4NEURO_API virtual double activate(double x, double b) override;
+        LIB4NEURO_API virtual double activate(double x,
+                                              double b) override;
 
         /**
          * Calculates the partial derivative of the activation function
@@ -86,19 +90,21 @@ namespace lib4neuro {
          * @return Partial derivative of the activation function according to the
          * bias, returns: (e^(b + x) (e^x - e^b))/(e^b + e^x)^3
          */
-        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x, double b) override;
+        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x,
+                                                                              double b) override;
 
         /**
          * Calculates d/dx of  e^(b - x)*(1 + e^(b - x))^(-2)
          * @return  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
          */
-        LIB4NEURO_API virtual double activation_function_eval_derivative(double x, double b) override;
+        LIB4NEURO_API virtual double activation_function_eval_derivative(double x,
+                                                                         double b) override;
 
         /**
          * Returns a pointer to a Neuron with derivative as its activation function
          * @return
          */
-        LIB4NEURO_API virtual NeuronLogistic *get_derivative() override;
+        LIB4NEURO_API virtual NeuronLogistic* get_derivative() override;
     };
 
 
@@ -121,7 +127,8 @@ namespace lib4neuro {
         /**
          * Evaluates '(e^(b + x) (e^b - e^x))/(e^b + e^x)^3' and returns the result
          */
-        LIB4NEURO_API virtual double activate(double x, double b) override;
+        LIB4NEURO_API virtual double activate(double x,
+                                              double b) override;
 
         /**
          * Calculates the partial derivative of the activation function
@@ -129,19 +136,21 @@ namespace lib4neuro {
          * @return Partial derivative of the activation function according to the
          * bias, returns: -(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
          */
-        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x, double b) override;
+        LIB4NEURO_API virtual double activation_function_eval_derivative_bias(double x,
+                                                                              double b) override;
 
         /**
          * Calculates d/dx of  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
          * @return (e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
          */
-        LIB4NEURO_API virtual double activation_function_eval_derivative(double x, double b) override;
+        LIB4NEURO_API virtual double activation_function_eval_derivative(double x,
+                                                                         double b) override;
 
         /**
          *
          * @return
          */
-        LIB4NEURO_API virtual NeuronLogistic *get_derivative() override;
+        LIB4NEURO_API virtual NeuronLogistic* get_derivative() override;
 
     };
 
diff --git a/src/Neuron/NeuronLogisticSerialization.h b/src/Neuron/NeuronLogisticSerialization.h
index ea9ffb85b6bb35e41ef4b16289826fc2fc025172..0e5f8cb9ce6f991f9c7b6a35443405510d578551 100644
--- a/src/Neuron/NeuronLogisticSerialization.h
+++ b/src/Neuron/NeuronLogisticSerialization.h
@@ -18,21 +18,27 @@ BOOST_CLASS_EXPORT_KEY(lib4neuro::NeuronLogistic_d2);
 namespace lib4neuro {
     struct NeuronLogistic::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuronLogistic &n, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuronLogistic& n,
+                              const unsigned int version) {
             ar & boost::serialization::base_object<Neuron>(n);
         }
     };
 
     struct NeuronLogistic_d1::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuronLogistic_d1 &n, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuronLogistic_d1& n,
+                              const unsigned int version) {
             ar & boost::serialization::base_object<NeuronLogistic>(n);
         }
     };
 
     struct NeuronLogistic_d2::access {
         template<class Archive>
-        static void serialize(Archive &ar, NeuronLogistic_d2 &n, const unsigned int version) {
+        static void serialize(Archive& ar,
+                              NeuronLogistic_d2& n,
+                              const unsigned int version) {
             ar & boost::serialization::base_object<NeuronLogistic_d1>(n);
         }
     };
@@ -50,8 +56,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive &ar, lib4neuro::NeuronLogistic &n, const unsigned int version) {
-            lib4neuro::NeuronLogistic::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronLogistic& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronLogistic::access::serialize(ar,
+                                                         n,
+                                                         version);
         }
 
         /**
@@ -62,8 +72,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive &ar, lib4neuro::NeuronLogistic_d1 &n, const unsigned int version) {
-            lib4neuro::NeuronLogistic_d1::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronLogistic_d1& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronLogistic_d1::access::serialize(ar,
+                                                            n,
+                                                            version);
         }
 
         /**
@@ -74,8 +88,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive &ar, lib4neuro::NeuronLogistic_d2 &n, const unsigned int version) {
-            lib4neuro::NeuronLogistic_d2::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronLogistic_d2& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronLogistic_d2::access::serialize(ar,
+                                                            n,
+                                                            version);
         }
 
     } // namespace serialization
diff --git a/src/Neuron/NeuronSerialization.h b/src/Neuron/NeuronSerialization.h
index 51260f3fb2e76c385120a9a8541b4491debcecd7..53ff06ab622bd0628ed698229a38d9d3ee01e6bb 100644
--- a/src/Neuron/NeuronSerialization.h
+++ b/src/Neuron/NeuronSerialization.h
@@ -44,9 +44,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::Neuron& n, const unsigned int version)
-        {
-            lib4neuro::Neuron::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::Neuron& n,
+                       const unsigned int version) {
+            lib4neuro::Neuron::access::serialize(ar,
+                                                 n,
+                                                 version);
         }
 
         /**
@@ -57,9 +60,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, lib4neuro::NeuronDifferentiable& n, const unsigned int version)
-        {
-            lib4neuro::NeuronDifferentiable::access::serialize(ar, n, version);
+        void serialize(Archive& ar,
+                       lib4neuro::NeuronDifferentiable& n,
+                       const unsigned int version) {
+            lib4neuro::NeuronDifferentiable::access::serialize(ar,
+                                                               n,
+                                                               version);
         }
 
     } // namespace serialization
diff --git a/src/NormalizationStrategy/NormalizationStrategy.cpp b/src/NormalizationStrategy/NormalizationStrategy.cpp
index 66c13b7815912bfe31a431070f45618cb2861070..059ac8c5a380bbb1dfe9751aa5741d3fe4165c26 100644
--- a/src/NormalizationStrategy/NormalizationStrategy.cpp
+++ b/src/NormalizationStrategy/NormalizationStrategy.cpp
@@ -20,8 +20,10 @@ double NormalizationStrategy::get_min_value() {
 
 DoubleUnitStrategy::DoubleUnitStrategy() {}
 
-double DoubleUnitStrategy::normalize(double n, double max, double min) {
-    if(this->max_min_inp_val.empty()) {
+double DoubleUnitStrategy::normalize(double n,
+                                     double max,
+                                     double min) {
+    if (this->max_min_inp_val.empty()) {
         this->max_min_inp_val.emplace_back(max);
         this->max_min_inp_val.emplace_back(min);
     } else {
@@ -29,11 +31,11 @@ double DoubleUnitStrategy::normalize(double n, double max, double min) {
         this->max_min_inp_val.at(1) = min;
     }
 
-    return 2*(n - min)/(max - min) - 1;
+    return 2 * (n - min) / (max - min) - 1;
 }
 
 double DoubleUnitStrategy::de_normalize(double n) {
-    if(this->max_min_inp_val.empty()) {
+    if (this->max_min_inp_val.empty()) {
         THROW_RUNTIME_ERROR("Data were not normalized, so de-normalization cannot progress!");
     }
 
diff --git a/src/NormalizationStrategy/NormalizationStrategy.h b/src/NormalizationStrategy/NormalizationStrategy.h
index f1ab46e831d690a76441e8c635f4632d3b2dea30..cd722aa91320ddc3e68c9bb1c85037f28eedb4e7 100644
--- a/src/NormalizationStrategy/NormalizationStrategy.h
+++ b/src/NormalizationStrategy/NormalizationStrategy.h
@@ -30,7 +30,9 @@ public:
      * @param min
      * @return
      */
-    virtual double normalize(double n, double max, double min) = 0;
+    virtual double normalize(double n,
+                             double max,
+                             double min) = 0;
 
     /**
      *
@@ -78,7 +80,9 @@ public:
      * @param min
      * @return
      */
-    double normalize(double n, double max, double min) override;
+    double normalize(double n,
+                     double max,
+                     double min) override;
 
     /**
      *
diff --git a/src/NormalizationStrategy/NormalizationStrategySerialization.h b/src/NormalizationStrategy/NormalizationStrategySerialization.h
index f42c09fd2221bb5688e4e70469b85dcf7f0314a9..9c44748383794d65f4a757d15d5350a52124d5b6 100644
--- a/src/NormalizationStrategy/NormalizationStrategySerialization.h
+++ b/src/NormalizationStrategy/NormalizationStrategySerialization.h
@@ -16,14 +16,18 @@ BOOST_CLASS_EXPORT_KEY(DoubleUnitStrategy);
 
 struct NormalizationStrategy::access {
     template<class Archive>
-    static void serialize(Archive &ar, NormalizationStrategy& ns, const unsigned int version) {
+    static void serialize(Archive& ar,
+                          NormalizationStrategy& ns,
+                          const unsigned int version) {
         ar & ns.max_min_inp_val;
     }
 };
 
 struct DoubleUnitStrategy::access {
     template<class Archive>
-    static void serialize(Archive &ar, DoubleUnitStrategy &s, const unsigned int version) {
+    static void serialize(Archive& ar,
+                          DoubleUnitStrategy& s,
+                          const unsigned int version) {
         ar & boost::serialization::base_object<NormalizationStrategy>(s);
     }
 };
@@ -39,9 +43,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, NormalizationStrategy& ns, const unsigned int version)
-        {
-            NormalizationStrategy::access::serialize(ar, ns, version);
+        void serialize(Archive& ar,
+                       NormalizationStrategy& ns,
+                       const unsigned int version) {
+            NormalizationStrategy::access::serialize(ar,
+                                                     ns,
+                                                     version);
         }
 
         /**
@@ -52,9 +59,12 @@ namespace boost {
          * @param version Boost parameter - filled automatically during serialization!
          */
         template<class Archive>
-        void serialize(Archive & ar, DoubleUnitStrategy& s, const unsigned int version)
-        {
-            DoubleUnitStrategy::access::serialize(ar, s, version);
+        void serialize(Archive& ar,
+                       DoubleUnitStrategy& s,
+                       const unsigned int version) {
+            DoubleUnitStrategy::access::serialize(ar,
+                                                  s,
+                                                  version);
         }
     } // namespace serialization
 } // namespace boost
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index d8d9fa0309acc99e43fa5926473ca8e89f452406..3a82547c833731ea4576e4e4dba3bcb81246a4cf 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -17,18 +17,21 @@ namespace lib4neuro {
     MultiIndex::MultiIndex(size_t dimension) {
         this->dim = dimension;
         this->partial_derivatives_degrees.resize(this->dim);
-        std::fill(this->partial_derivatives_degrees.begin(), this->partial_derivatives_degrees.end(), 0);
+        std::fill(this->partial_derivatives_degrees.begin(),
+                  this->partial_derivatives_degrees.end(),
+                  0);
     }
 
-    void MultiIndex::set_partial_derivative(size_t index, size_t value) {
+    void MultiIndex::set_partial_derivative(size_t index,
+                                            size_t value) {
         this->partial_derivatives_degrees.at(index) = value;
     }
 
-    std::vector<size_t> *MultiIndex::get_partial_derivatives_degrees() {
+    std::vector<size_t>* MultiIndex::get_partial_derivatives_degrees() {
         return &this->partial_derivatives_degrees;
     }
 
-    bool MultiIndex::operator<(const MultiIndex &rhs) const {
+    bool MultiIndex::operator<(const MultiIndex& rhs) const {
         if (dim < rhs.dim) { return true; }
         else if (dim > rhs.dim) { return false; }
 
@@ -47,10 +50,14 @@ namespace lib4neuro {
         char buff[255];
 
         for (size_t i = 0; i < this->dim - 1; ++i) {
-            sprintf(buff, "%d, ", (int) this->partial_derivatives_degrees[i]);
+            sprintf(buff,
+                    "%d, ",
+                    (int) this->partial_derivatives_degrees[i]);
             output.append(buff);
         }
-        sprintf(buff, "%d", (int) this->partial_derivatives_degrees[this->dim - 1]);
+        sprintf(buff,
+                "%d",
+                (int) this->partial_derivatives_degrees[this->dim - 1]);
         output.append(buff);
 
         return output;
@@ -67,7 +74,9 @@ namespace lib4neuro {
     }
 
 
-    DESolver::DESolver(size_t n_equations, size_t n_inputs, size_t m) {
+    DESolver::DESolver(size_t n_equations,
+                       size_t n_inputs,
+                       size_t m) {
 
         if (m <= 0 || n_inputs <= 0 || n_equations <= 0) {
             THROW_INVALID_ARGUMENT_ERROR("Parameters 'm', 'n_equations', 'n_inputs' and 'n_outputs' must be greater than zero!");
@@ -76,7 +85,8 @@ namespace lib4neuro {
                (int) n_equations);
 
         printf("Constructing NN structure representing the solution [%d input neurons][%d inner neurons]...\n",
-               (int) n_inputs, (int) m);
+               (int) n_inputs,
+               (int) m);
 
         this->dim_i = n_inputs;
         this->dim_inn = m;
@@ -90,7 +100,8 @@ namespace lib4neuro {
         for (size_t i = 0; i < this->dim_i; ++i) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
-            idx = this->solution->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
+            idx = this->solution->add_neuron(new_neuron,
+                                             BIAS_TYPE::NO_BIAS);
             input_set[i] = idx;
         }
         this->solution->specify_input_neurons(input_set);
@@ -100,7 +111,8 @@ namespace lib4neuro {
         std::vector<size_t> output_set(1);
         std::shared_ptr<Neuron> new_neuron;
         new_neuron.reset(new NeuronLinear());
-        idx = this->solution->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);//f(x) = x
+        idx = this->solution->add_neuron(new_neuron,
+                                         BIAS_TYPE::NO_BIAS);//f(x) = x
         output_set[0] = idx;
         this->solution->specify_output_neurons(output_set);
         size_t first_output_neuron = idx;
@@ -111,7 +123,8 @@ namespace lib4neuro {
             std::shared_ptr<NeuronLogistic> new_neuron2;
             new_neuron2.reset(new NeuronLogistic());
             this->solution_inner_neurons.push_back(new_neuron2);
-            idx = this->solution->add_neuron(new_neuron2, BIAS_TYPE::NEXT_BIAS);
+            idx = this->solution->add_neuron(new_neuron2,
+                                             BIAS_TYPE::NEXT_BIAS);
 
             if (i == 0) {
                 first_inner_neuron = idx;
@@ -122,20 +135,29 @@ namespace lib4neuro {
         size_t weight_idx;
         for (size_t i = 0; i < this->dim_i; ++i) {
             for (size_t j = 0; j < this->dim_inn; ++j) {
-                weight_idx = this->solution->add_connection_simple(first_input_neuron + i, first_inner_neuron + j,
+                weight_idx = this->solution->add_connection_simple(first_input_neuron + i,
+                                                                   first_inner_neuron + j,
                                                                    SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
                 printf("  adding a connection between input neuron %2d[%2d] and inner neuron %2d[%2d], weight index %3d\n",
-                       (int) i, (int) (first_input_neuron + i), (int) j, (int) (first_inner_neuron + j),
+                       (int) i,
+                       (int) (first_input_neuron + i),
+                       (int) j,
+                       (int) (first_inner_neuron + j),
                        (int) weight_idx);
             }
         }
 
         /* connections between inner neurons and output neurons */
         for (size_t i = 0; i < this->dim_inn; ++i) {
-            weight_idx = this->solution->add_connection_simple(first_inner_neuron + i, first_output_neuron,
+            weight_idx = this->solution->add_connection_simple(first_inner_neuron + i,
+                                                               first_output_neuron,
                                                                SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
             printf("  adding a connection between inner neuron %2d[%2d] and output neuron %2d[%2d], weight index %3d\n",
-                   (int) i, (int) (first_inner_neuron + i), 0, (int) (first_output_neuron), (int) weight_idx);
+                   (int) i,
+                   (int) (first_inner_neuron + i),
+                   0,
+                   (int) (first_output_neuron),
+                   (int) weight_idx);
         }
 
         MultiIndex initial_mi(this->dim_i);
@@ -157,11 +179,12 @@ namespace lib4neuro {
     DESolver::~DESolver() {
 
 
-
     }
 
 //TODO more efficient representation of the functions (large portion of the structure is the same for all partial derivatives)
-    void DESolver::add_to_differential_equation(size_t equation_idx, MultiIndex &alpha, std::string expression_string) {
+    void DESolver::add_to_differential_equation(size_t equation_idx,
+                                                MultiIndex& alpha,
+                                                std::string expression_string) {
 
         if (equation_idx >= this->n_equations) {
             THROW_INVALID_ARGUMENT_ERROR("The provided equation index is too large!");
@@ -191,13 +214,18 @@ namespace lib4neuro {
         /* we check whether the new multi-index is already present */
         if (map_multiindices2nn.find(alpha) != map_multiindices2nn.end()) {
             new_net = map_multiindices2nn[alpha];
-            this->differential_equations.at(equation_idx)->add_network(new_net.get(), expression_string);
+            this->differential_equations.at(equation_idx)->add_network(new_net.get(),
+                                                                       expression_string);
             printf("\nAdding an existing partial derivative (multi-index: %s) to equation %d with coefficient %s\n",
-                   alpha.to_string().c_str(), (int) equation_idx, expression_string.c_str());
+                   alpha.to_string().c_str(),
+                   (int) equation_idx,
+                   expression_string.c_str());
             return;
         }
         printf("\nAdding a new partial derivative (multi-index: %s) to equation %d with coefficient %s\n",
-               alpha.to_string().c_str(), (int) equation_idx, expression_string.c_str());
+               alpha.to_string().c_str(),
+               (int) equation_idx,
+               expression_string.c_str());
 
         /* we need to construct a new neural network */
         new_net.reset(new NeuralNetwork());
@@ -209,7 +237,8 @@ namespace lib4neuro {
         for (size_t i = 0; i < this->dim_i; ++i) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
-            idx = new_net->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
+            idx = new_net->add_neuron(new_neuron,
+                                      BIAS_TYPE::NO_BIAS);
             input_set[i] = idx;
         }
         new_net->specify_input_neurons(input_set);
@@ -220,7 +249,8 @@ namespace lib4neuro {
         std::vector<size_t> output_set(1);
         std::shared_ptr<Neuron> new_neuron;
         new_neuron.reset(new NeuronLinear());
-        idx = new_net->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);//f(x) = x
+        idx = new_net->add_neuron(new_neuron,
+                                  BIAS_TYPE::NO_BIAS);//f(x) = x
         output_set[0] = idx;
         new_net->specify_output_neurons(output_set);
         size_t first_output_neuron = idx;
@@ -238,7 +268,8 @@ namespace lib4neuro {
                 n_ptr = std::shared_ptr<NeuronLogistic>(n_ptr->get_derivative());
 
             }
-            idx = new_net->add_neuron(n_ptr, BIAS_TYPE::EXISTING_BIAS,
+            idx = new_net->add_neuron(n_ptr,
+                                      BIAS_TYPE::EXISTING_BIAS,
                                       this->solution->get_neuron_bias_index(i + this->dim_i + 1));
 
             if (i == 0) {
@@ -251,7 +282,8 @@ namespace lib4neuro {
         for (size_t i = 0; i < derivative_degree * this->dim_inn; ++i) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
-            idx = new_net->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS); //f(x) = x
+            idx = new_net->add_neuron(new_neuron,
+                                      BIAS_TYPE::NO_BIAS); //f(x) = x
         }
 
         /* connections between input neurons and inner neurons */
@@ -259,9 +291,14 @@ namespace lib4neuro {
         for (size_t i = 0; i < this->dim_i; ++i) {
             for (size_t j = 0; j < this->dim_inn; ++j) {
                 printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], connection index: %3d\n",
-                       (int) i, (int) (first_input_neuron + i), (int) j, (int) (first_inner_neuron + j),
+                       (int) i,
+                       (int) (first_input_neuron + i),
+                       (int) j,
+                       (int) (first_inner_neuron + j),
                        (int) connection_idx);
-                new_net->add_existing_connection(first_input_neuron + i, first_inner_neuron + j, connection_idx,
+                new_net->add_existing_connection(first_input_neuron + i,
+                                                 first_inner_neuron + j,
+                                                 connection_idx,
                                                  *this->solution);
                 connection_idx++;
             }
@@ -271,9 +308,14 @@ namespace lib4neuro {
         /* connections between inner neurons and the first set of 'glueing' neurons */
         for (size_t i = 0; i < this->dim_inn; ++i) {
             printf("  adding a connection between inner neuron %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n",
-                   (int) i, (int) (first_inner_neuron + i), (int) i, (int) (first_glue_neuron + i),
+                   (int) i,
+                   (int) (first_inner_neuron + i),
+                   (int) i,
+                   (int) (first_glue_neuron + i),
                    (int) connection_idx);
-            new_net->add_existing_connection(first_inner_neuron + i, first_glue_neuron + i, connection_idx,
+            new_net->add_existing_connection(first_inner_neuron + i,
+                                             first_glue_neuron + i,
+                                             connection_idx,
                                              *this->solution);
             connection_idx++;
         }
@@ -286,11 +328,14 @@ namespace lib4neuro {
             for (size_t i = 0; i < this->dim_inn; ++i) {
                 connection_idx = pd_idx * this->dim_inn + i;
                 printf("  adding a connection between glue neuron  %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n",
-                       (int) (i + (di) * this->dim_inn), (int) (first_glue_neuron + i + (di) * this->dim_inn),
-                       (int) (i + (di + 1) * this->dim_inn), (int) (first_glue_neuron + i + (di + 1) * this->dim_inn),
+                       (int) (i + (di) * this->dim_inn),
+                       (int) (first_glue_neuron + i + (di) * this->dim_inn),
+                       (int) (i + (di + 1) * this->dim_inn),
+                       (int) (first_glue_neuron + i + (di + 1) * this->dim_inn),
                        (int) connection_idx);
                 new_net->add_existing_connection(first_glue_neuron + i + (di) * this->dim_inn,
-                                                 first_glue_neuron + i + (di + 1) * this->dim_inn, connection_idx,
+                                                 first_glue_neuron + i + (di + 1) * this->dim_inn,
+                                                 connection_idx,
                                                  *this->solution);
             }
         }
@@ -302,27 +347,38 @@ namespace lib4neuro {
             connection_idx = pd_idx * this->dim_inn + i;
             printf("  adding a connection between glue neuron %2d[%2d] and output neuron  %2d[%2d], connection index: %3d\n",
                    (int) (i + (derivative_degree - 1) * this->dim_inn),
-                   (int) (first_glue_neuron + i + (derivative_degree - 1) * this->dim_inn), 0,
-                   (int) (first_output_neuron), (int) connection_idx);
+                   (int) (first_glue_neuron + i + (derivative_degree - 1) * this->dim_inn),
+                   0,
+                   (int) (first_output_neuron),
+                   (int) connection_idx);
             new_net->add_existing_connection(first_glue_neuron + i + (derivative_degree - 1) * this->dim_inn,
-                                             first_output_neuron, connection_idx, *this->solution);
+                                             first_output_neuron,
+                                             connection_idx,
+                                             *this->solution);
         }
 
         map_multiindices2nn[alpha] = new_net;
 
-        this->differential_equations.at(equation_idx)->add_network(new_net.get(), expression_string);
+        this->differential_equations.at(equation_idx)->add_network(new_net.get(),
+                                                                   expression_string);
     }
 
 
-    void DESolver::add_to_differential_equation(size_t equation_idx, std::string expression_string) {
+    void DESolver::add_to_differential_equation(size_t equation_idx,
+                                                std::string expression_string) {
 
-        printf("Adding a known function '%s' to equation %d\n", expression_string.c_str(), (int) equation_idx);
-        this->differential_equations.at(equation_idx)->add_network(nullptr, expression_string);
+        printf("Adding a known function '%s' to equation %d\n",
+               expression_string.c_str(),
+               (int) equation_idx);
+        this->differential_equations.at(equation_idx)->add_network(nullptr,
+                                                                   expression_string);
 
     }
 
 
-    void DESolver::set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions) {
+    void DESolver::set_error_function(size_t equation_idx,
+                                      ErrorFunctionType F,
+                                      DataSet* conditions) {
         if (equation_idx >= this->n_equations) {
             THROW_INVALID_ARGUMENT_ERROR("The parameter 'equation_idx' is too large! It exceeds the number of differential equations.");
         }
@@ -335,7 +391,7 @@ namespace lib4neuro {
     }
 
 //TODO instead use general method with Optimizer as its argument (create hierarchy of optimizers)
-    void DESolver::solve(LearningMethod &learning_method) {
+    void DESolver::solve(LearningMethod& learning_method) {
 
 
         std::shared_ptr<NeuralNetwork> nn;
@@ -348,23 +404,30 @@ namespace lib4neuro {
             ds = this->errors_functions_data_sets.at(i);
             if (ds) {
                 if (this->errors_functions_types.at(i) == ErrorFunctionType::ErrorFuncMSE) {
-                    total_error.add_error_function(new MSE(nn.get(), ds.get()), 1.0);
+                    total_error.add_error_function(new MSE(nn.get(),
+                                                           ds.get()),
+                                                   1.0);
                 } else {
                     //default
-                    total_error.add_error_function(new MSE(nn.get(), ds.get()), 1.0);
+                    total_error.add_error_function(new MSE(nn.get(),
+                                                           ds.get()),
+                                                   1.0);
                 }
             } else {
-                total_error.add_error_function(nullptr, 1.0);
+                total_error.add_error_function(nullptr,
+                                               1.0);
             }
         }
 
-        printf("error before optimization: %f\n", total_error.eval(nullptr));
+        printf("error before optimization: %f\n",
+               total_error.eval(nullptr));
 
         learning_method.optimize(total_error);
         std::vector<double> params = *learning_method.get_parameters();
         this->solution->copy_parameter_space(&params);
 
-        printf("error after optimization: %f\n", total_error.eval(nullptr));
+        printf("error after optimization: %f\n",
+               total_error.eval(nullptr));
     }
 
     void DESolver::randomize_parameters() {
@@ -374,48 +437,55 @@ namespace lib4neuro {
 
     }
 
-    NeuralNetwork *DESolver::get_solution(MultiIndex &alpha) {
+    NeuralNetwork* DESolver::get_solution(MultiIndex& alpha) {
         return this->map_multiindices2nn[alpha].get();
     }
 
     double
-    DESolver::eval_equation(size_t equation_idx, std::shared_ptr<std::vector<double>>weight_and_biases, std::vector<double> &input) {
+    DESolver::eval_equation(size_t equation_idx,
+                            std::shared_ptr<std::vector<double>> weight_and_biases,
+                            std::vector<double>& input) {
         std::vector<double> output(1);
 
-        this->differential_equations.at(equation_idx)->eval_single(input, output, weight_and_biases.get());
+        this->differential_equations.at(equation_idx)->eval_single(input,
+                                                                   output,
+                                                                   weight_and_biases.get());
 
 
         return output[0];
     }
 
-    double DESolver::eval_total_error(std::vector<double> &weights_and_biases) {
+    double DESolver::eval_total_error(std::vector<double>& weights_and_biases) {
 
 
         std::shared_ptr<NeuralNetwork> nn;
         std::shared_ptr<DataSet> ds;
 
         ///* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
-        std::vector<ErrorFunction *> error_functions(this->n_equations);
+        std::vector<ErrorFunction*> error_functions(this->n_equations);
         for (size_t i = 0; i < this->n_equations; ++i) {
             nn = this->differential_equations.at(i);
             ds = this->errors_functions_data_sets.at(i);
 
             if (this->errors_functions_types.at(i) == ErrorFunctionType::ErrorFuncMSE) {
-                error_functions[i] = new MSE(nn.get(), ds.get());
+                error_functions[i] = new MSE(nn.get(),
+                                             ds.get());
             } else {
                 //default
-                error_functions[i] = new MSE(nn.get(), ds.get());
+                error_functions[i] = new MSE(nn.get(),
+                                             ds.get());
             }
         }
 
         /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
         ErrorSum total_error;
         for (size_t i = 0; i < this->n_equations; ++i) {
-            total_error.add_error_function(error_functions[i], 1.0);
+            total_error.add_error_function(error_functions[i],
+                                           1.0);
         }
 
         //return total_error.eval(&weights_and_biases);
-		return 64;
+        return 64;
     }
 
 }
\ No newline at end of file
diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h
index c0b106186d498074b0a86e1682ce24306bc1d8a5..08c9247f9bd1d1bb0cf93c34567f1a9f9e458fbc 100644
--- a/src/Solvers/DESolver.h
+++ b/src/Solvers/DESolver.h
@@ -5,10 +5,10 @@
  * @date 22.7.18 -
  */
 
- //TODO incorporate uncertainities as coefficients in NeuralNetworkSum or ErrorSum
- //TODO add support for multiple unknown functions to be found
- //TODO add export capability?
- //TODO restructure of the learning methods to have a common parent to be used as a parameter in the solvers
+//TODO incorporate uncertainities as coefficients in NeuralNetworkSum or ErrorSum
+//TODO add support for multiple unknown functions to be found
+//TODO add export capability?
+//TODO restructure of the learning methods to have a common parent to be used as a parameter in the solvers
 
 #ifndef INC_4NEURO_PDESOLVER_H
 #define INC_4NEURO_PDESOLVER_H
@@ -55,13 +55,14 @@ namespace lib4neuro {
          * @param index
          * @param value
          */
-        LIB4NEURO_API void set_partial_derivative(size_t index, size_t value);
+        LIB4NEURO_API void set_partial_derivative(size_t index,
+                                                  size_t value);
 
         /**
          *
          * @return
          */
-        LIB4NEURO_API std::vector<size_t> *get_partial_derivatives_degrees();
+        LIB4NEURO_API std::vector<size_t>* get_partial_derivatives_degrees();
 
 
         /**
@@ -69,7 +70,7 @@ namespace lib4neuro {
          * @param rhs
          * @return
          */
-        LIB4NEURO_API bool operator<(const MultiIndex &rhs) const;
+        LIB4NEURO_API bool operator<(const MultiIndex& rhs) const;
 
         /**
          *
@@ -113,7 +114,9 @@ namespace lib4neuro {
          * @param n_inputs
          * @param m
          */
-        LIB4NEURO_API DESolver(size_t n_equations, size_t n_inputs, size_t m);
+        LIB4NEURO_API DESolver(size_t n_equations,
+                               size_t n_inputs,
+                               size_t m);
 
         /**
          * default destructor
@@ -127,7 +130,9 @@ namespace lib4neuro {
          * @param beta
          */
         LIB4NEURO_API void
-        add_to_differential_equation(size_t equation_idx, MultiIndex &alpha, std::string expression_string);
+        add_to_differential_equation(size_t equation_idx,
+                                     MultiIndex& alpha,
+                                     std::string expression_string);
 
 
         /**
@@ -135,7 +140,8 @@ namespace lib4neuro {
          * @param equation_idx
          * @param expression_string
          */
-        LIB4NEURO_API void add_to_differential_equation(size_t equation_idx, std::string expression_string);
+        LIB4NEURO_API void add_to_differential_equation(size_t equation_idx,
+                                                        std::string expression_string);
 
         /**
          * Sets the error function for the differential equation with the corresponding index
@@ -143,13 +149,15 @@ namespace lib4neuro {
          * @param F
          * @param conditions
          */
-        LIB4NEURO_API void set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions);
+        LIB4NEURO_API void set_error_function(size_t equation_idx,
+                                              ErrorFunctionType F,
+                                              DataSet* conditions);
 
         /**
          *
          * @param learning_method
          */
-        LIB4NEURO_API void solve(LearningMethod &learning_method);
+        LIB4NEURO_API void solve(LearningMethod& learning_method);
 
         /**
          *
@@ -160,19 +168,21 @@ namespace lib4neuro {
          * returns the pointer to the object representing the given partial derivative of the solution
          * @return
          */
-        LIB4NEURO_API NeuralNetwork *get_solution(MultiIndex &alpha);
+        LIB4NEURO_API NeuralNetwork* get_solution(MultiIndex& alpha);
 
         /**
          * For testing purposes only
          */
         LIB4NEURO_API double
-        eval_equation(size_t equation_idx, std::shared_ptr<std::vector<double>>weights_and_biases, std::vector<double> &input);
+        eval_equation(size_t equation_idx,
+                      std::shared_ptr<std::vector<double>> weights_and_biases,
+                      std::vector<double>& input);
 
         /**
          * For testing purposes only
          * @return
          */
-        LIB4NEURO_API double eval_total_error(std::vector<double> &weights_and_biases);
+        LIB4NEURO_API double eval_total_error(std::vector<double>& weights_and_biases);
     };
 
 }
diff --git a/src/boost_test_lib_dummy.cpp b/src/boost_test_lib_dummy.cpp
index f8ea847f04537e4daece460ef0c59f910f37e2c9..e43ead867903467f44029e23e46b0b77e034e18f 100644
--- a/src/boost_test_lib_dummy.cpp
+++ b/src/boost_test_lib_dummy.cpp
@@ -1,7 +1,9 @@
 
 
 #ifndef BOOST_TEST_MODULE
-	#define BOOST_TEST_MODULE unit_test 
-	#include <boost/test/included/unit_test.hpp>
+#define BOOST_TEST_MODULE unit_test
+
+#include <boost/test/included/unit_test.hpp>
+
 #endif
 
diff --git a/src/examples/CMakeLists.txt b/src/examples/CMakeLists.txt
index fe2cfea26f41fa6cc379e8d889148d0f7265aca7..11c8b0f2bf51679511a6840e0f930b759b7d9546 100644
--- a/src/examples/CMakeLists.txt
+++ b/src/examples/CMakeLists.txt
@@ -39,20 +39,20 @@ add_executable(x2_fitting x2_fitting.cpp)
 target_link_libraries(x2_fitting PUBLIC lib4neuro)
 
 set_target_properties(
-    test_cases
-    net_test_1
-    net_test_2
-    net_test_3
-    net_test_ode_1
-    net_test_pde_1
-    network_serialization
-    test_harmonic_oscilator
-    seminar
-    simulator_1_1
-    simulator_1_2
-    x2_fitting
-
-    PROPERTIES
+        test_cases
+        net_test_1
+        net_test_2
+        net_test_3
+        net_test_ode_1
+        net_test_pde_1
+        network_serialization
+        test_harmonic_oscilator
+        seminar
+        simulator_1_1
+        simulator_1_2
+        x2_fitting
+
+        PROPERTIES
         ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib/"
         LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
         RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin/examples"
diff --git a/src/examples/main.cpp b/src/examples/main.cpp
index ddcf0c82e5fddd130a95124e01acfa3bb112ff48..86d052844f9e939d323e9dae872134249c5e46d4 100644
--- a/src/examples/main.cpp
+++ b/src/examples/main.cpp
@@ -15,7 +15,8 @@
 
 #include "4neuro.h"
 
-int main(int argc, char** argv){
+int main(int argc,
+         char** argv) {
 
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
     std::vector<double> inp, out;
@@ -25,7 +26,8 @@ int main(int argc, char** argv){
         out.push_back(i + 4);
     }
 
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     lib4neuro::DataSet DataSet(&data_vec);
     int elements = DataSet.get_n_elements();
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index 73c7ff5251b8885145687bcdedd0d542185d9ea9..f659c71281d8e73a79a7cd4561ed31e04d2615ea 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -8,12 +8,13 @@
 
 #include "4neuro.h"
 
-void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &ef ){
+void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
+                                 l4n::ErrorFunction& ef) {
 
     /* TRAINING METHOD SETUP */
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -44,36 +45,48 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e
             n_particles,
             iter_max
     );
-    swarm_01.optimize( ef );
+    swarm_01.optimize(ef);
 
     net.copy_parameter_space(swarm_01.get_parameters());
 
     /* ERROR CALCULATION */
-    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
-    std::cout << "***********************************************************************************************************************" <<std::endl;
+    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
 }
 
-void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction &ef ){
+void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
+                                   l4n::ErrorFunction& ef) {
 
-    std::cout << "***********************************************************************************************************************" <<std::endl;
-    l4n::GradientDescentBB gd( 1e-6, 1000 );
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
+    l4n::GradientDescentBB gd(1e-6,
+                              1000);
 
-    gd.optimize( ef );
+    gd.optimize(ef);
 
     net.copy_parameter_space(gd.get_parameters());
 
     /* ERROR CALCULATION */
-    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr ) << std::endl;
+    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval(nullptr) << std::endl;
 }
 
 int main() {
 
-    std::cout << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons" << std::endl;
-    std::cout << "***********************************************************************************************************************" <<std::endl;
+    std::cout
+            << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons"
+            << std::endl;
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50" << std::endl;
     std::cout << "1 * w1 + 0.5*w2 = 0.75" << std::endl;
-    std::cout << "***********************************************************************************************************************" <<std::endl;
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
@@ -81,11 +94,13 @@ int main() {
 
     inp = {0, 1};
     out = {0.5};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {1, 0.5};
     out = {0.75};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     l4n::DataSet ds(&data_vec);
 
@@ -100,13 +115,20 @@ int main() {
     std::shared_ptr<l4n::NeuronLinear> o1 = std::make_shared<l4n::NeuronLinear>();
 
     /* Adding neurons to the net */
-    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx1 = net.add_neuron(i1,
+                                 l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2,
+                                 l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1,
+                                 l4n::BIAS_TYPE::NO_BIAS);
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
-    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx1,
+                              idx3,
+                              l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx2,
+                              idx3,
+                              l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
 
 
 
@@ -122,21 +144,25 @@ int main() {
     net.specify_output_neurons(net_output_neurons_indices);
 
     /* ERROR FUNCTION SPECIFICATION */
-    l4n::MSE mse(&net, &ds);
+    l4n::MSE mse(&net,
+                 &ds);
 
     /* PARTICLE SWARM LEARNING */
     net.randomize_parameters();
-    optimize_via_particle_swarm( net, mse );
+    optimize_via_particle_swarm(net,
+                                mse);
 
 
     /* GRADIENT DESCENT LEARNING */
     net.randomize_parameters();
-    optimize_via_gradient_descent( net, mse );
+    optimize_via_gradient_descent(net,
+                                  mse);
 
     /* Normalize data to prevent 'nan' results */
     ds.normalize();
     net.randomize_parameters();
-    optimize_via_gradient_descent(net, mse);
+    optimize_via_gradient_descent(net,
+                                  mse);
 
     return 0;
 }
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index c8fe35e3c8154b9db139c326ae86e5b60f4b02c8..7d9825b696abb9a649ced056a061f8e147dfa28f 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -7,12 +7,13 @@
 
 #include "4neuro.h"
 
-void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &ef ){
+void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
+                                 l4n::ErrorFunction& ef) {
 
     /* TRAINING METHOD SETUP */
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -43,35 +44,47 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e
             n_particles,
             iter_max
     );
-    swarm_01.optimize( ef );
+    swarm_01.optimize(ef);
 
     net.copy_parameter_space(swarm_01.get_parameters());
 
-    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
-    std::cout << "***********************************************************************************************************************" <<std::endl;
+    std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
 }
 
-void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction &ef ){
+void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
+                                   l4n::ErrorFunction& ef) {
 
-    l4n::GradientDescentBB gd( 1e-6, 1000 );
+    l4n::GradientDescentBB gd(1e-6,
+                              1000);
 
-    gd.optimize( ef );
+    gd.optimize(ef);
 
     net.copy_parameter_space(gd.get_parameters());
 
     /* ERROR CALCULATION */
-    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr )<< std::endl;
-    std::cout << "***********************************************************************************************************************" <<std::endl;
+    std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval(nullptr) << std::endl;
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
 }
 
 int main() {
-    std::cout << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights"
+            << std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << " 0 * w1 + 1 * w2 = 0.50 + b1" << std::endl;
     std::cout << " 1 * w1 + 0.5*w2 = 0.75 + b1" << std::endl;
     std::cout << "(1.25 + b2) * w2 = 0.63 + b3" << std::endl;
-    std::cout << "***********************************************************************************************************************" <<std::endl;
+    std::cout
+            << "***********************************************************************************************************************"
+            << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
@@ -79,15 +92,18 @@ int main() {
 
     inp = {0, 1, 0};
     out = {0.5, 0};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {1, 0.5, 0};
     out = {0.75, 0};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {0, 0, 1.25};
     out = {0, 0.63};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
     l4n::DataSet ds(&data_vec);
 
     /* NETWORK DEFINITION */
@@ -104,16 +120,28 @@ int main() {
     std::shared_ptr<l4n::NeuronLinear> o2 = std::make_shared<l4n::NeuronLinear>();
 
     /* Adding neurons to the nets */
-    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS);
-    size_t idx4 = net.add_neuron(i3, l4n::BIAS_TYPE::NEXT_BIAS);
-    size_t idx5 = net.add_neuron(o2, l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx1 = net.add_neuron(i1,
+                                 l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2,
+                                 l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1,
+                                 l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx4 = net.add_neuron(i3,
+                                 l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx5 = net.add_neuron(o2,
+                                 l4n::BIAS_TYPE::NEXT_BIAS);
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
-    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
-    net.add_connection_simple(idx4, idx5, l4n::SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
+    net.add_connection_simple(idx1,
+                              idx3,
+                              l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
+    net.add_connection_simple(idx2,
+                              idx3,
+                              l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
+    net.add_connection_simple(idx4,
+                              idx5,
+                              l4n::SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT,
+                              0); // AGAIN weight index 0 - same weight!
 
     /* specification of the input/output neurons */
     std::vector<size_t> net_input_neurons_indices(3);
@@ -129,16 +157,19 @@ int main() {
     net.specify_output_neurons(net_output_neurons_indices);
 
     /* COMPLEX ERROR FUNCTION SPECIFICATION */
-    l4n::MSE mse(&net, &ds);
+    l4n::MSE mse(&net,
+                 &ds);
 
     /* PARTICLE SWARM LEARNING */
     net.randomize_weights();
-    optimize_via_particle_swarm( net, mse );
+    optimize_via_particle_swarm(net,
+                                mse);
 
 
     /* GRADIENT DESCENT LEARNING */
     net.randomize_weights();
-    optimize_via_gradient_descent( net, mse );
+    optimize_via_gradient_descent(net,
+                                  mse);
 
     return 0;
 }
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index c278101f8b379bf05c1d357be115495066b3ae36..3911bc0f33bae58f29ae9e32190d49a30907f913 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -18,14 +18,15 @@
 #include <boost/random/uniform_real_distribution.hpp>
 
 
-double get_difference(std::vector<double> &a, std::vector<double> &b){
+double get_difference(std::vector<double>& a,
+                      std::vector<double>& b) {
 
     double out = 0.0, m;
 
-    for( size_t i = 0; i < a.size(); ++i ){
+    for (size_t i = 0; i < a.size(); ++i) {
 
 
-        m = a[i]-b[i];
+        m = a[i] - b[i];
         out += m * m;
     }
 
@@ -34,22 +35,28 @@ double get_difference(std::vector<double> &a, std::vector<double> &b){
 }
 
 
-void calculate_gradient_analytical(std::vector<double> &input, std::vector<double> &parameter_biases, std::vector<double> &parameter_weights, size_t n_hidden_neurons, std::vector<double> &gradient_analytical ){
+void calculate_gradient_analytical(std::vector<double>& input,
+                                   std::vector<double>& parameter_biases,
+                                   std::vector<double>& parameter_weights,
+                                   size_t n_hidden_neurons,
+                                   std::vector<double>& gradient_analytical) {
 
     double a, b, y, x = input[0];
-    for( size_t i = 0; i < n_hidden_neurons; ++i ){
+    for (size_t i = 0; i < n_hidden_neurons; ++i) {
         a = parameter_weights[i];
         b = parameter_biases[i];
         y = parameter_weights[n_hidden_neurons + i];
 
-        gradient_analytical[i] += y * x * std::exp(b - a * x) / ((1+std::exp(b - a * x))*(1+std::exp(b - a * x)));
-        gradient_analytical[n_hidden_neurons + i] += 1.0 / ((1+std::exp(b - a * x)));
-        gradient_analytical[2*n_hidden_neurons + i] -= y * std::exp(b - a * x) / ((1+std::exp(b - a * x))*(1+std::exp(b - a * x)));
+        gradient_analytical[i] += y * x * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
+        gradient_analytical[n_hidden_neurons + i] += 1.0 / ((1 + std::exp(b - a * x)));
+        gradient_analytical[2 * n_hidden_neurons + i] -=
+                y * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
     }
 
 }
 
-int main(int argc, char** argv) {
+int main(int argc,
+         char** argv) {
 
     int n_tests = 10000;
     int n_hidden_neurons = 20;
@@ -61,23 +68,27 @@ int main(int argc, char** argv) {
 
         /* Fully connected feed-forward network with linear activation functions for input and output */
         /* layers and the specified activation fns for the hidden ones (each entry = layer)*/
-        std::vector<l4n::NEURON_TYPE> hidden_type_v = { l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC }; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
-        l4n::FullyConnectedFFN nn1(&neuron_numbers_in_layers, &hidden_type_v);
+        std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC,
+                                                       l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC,
+                                                       l4n::NEURON_TYPE::LOGISTIC}; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
+        l4n::FullyConnectedFFN nn1(&neuron_numbers_in_layers,
+                                   &hidden_type_v);
         nn1.randomize_parameters();
 
         boost::random::mt19937 gen(std::time(0));
-        boost::random::uniform_real_distribution<> dist(-1, 1);
+        boost::random::uniform_real_distribution<> dist(-1,
+                                                        1);
 
         size_t n_parameters = nn1.get_n_weights() + nn1.get_n_biases();
         std::vector<double> gradient_backprogation(n_parameters);
         std::vector<double> gradient_analytical(n_parameters);
-        std::vector<double> *parameter_biases = nn1.get_parameter_ptr_biases();
-        std::vector<double> *parameter_weights = nn1.get_parameter_ptr_weights();
+        std::vector<double>* parameter_biases = nn1.get_parameter_ptr_biases();
+        std::vector<double>* parameter_weights = nn1.get_parameter_ptr_weights();
         std::vector<double> error_derivative = {1};
 
         size_t n_good = 0, n_bad = 0;
 
-        for(int i = 0; i < n_tests; ++i){
+        for (int i = 0; i < n_tests; ++i) {
 
             std::vector<double> input(1);
             std::vector<double> output(1);
@@ -86,20 +97,32 @@ int main(int argc, char** argv) {
             output[0] = 0;
 
 
-            std::fill(gradient_backprogation.begin(), gradient_backprogation.end(), 0);
-            std::fill(gradient_analytical.begin(), gradient_analytical.end(), 0);
+            std::fill(gradient_backprogation.begin(),
+                      gradient_backprogation.end(),
+                      0);
+            std::fill(gradient_analytical.begin(),
+                      gradient_analytical.end(),
+                      0);
 
-            nn1.eval_single(input, output);
+            nn1.eval_single(input,
+                            output);
 
-            calculate_gradient_analytical(input, *parameter_biases, *parameter_weights, n_hidden_neurons, gradient_analytical );
-            nn1.add_to_gradient_single(input, error_derivative, 1, gradient_backprogation);
+            calculate_gradient_analytical(input,
+                                          *parameter_biases,
+                                          *parameter_weights,
+                                          n_hidden_neurons,
+                                          gradient_analytical);
+            nn1.add_to_gradient_single(input,
+                                       error_derivative,
+                                       1,
+                                       gradient_backprogation);
 
-            double diff = get_difference(gradient_backprogation, gradient_analytical);
+            double diff = get_difference(gradient_backprogation,
+                                         gradient_analytical);
 
-            if ( diff < 1e-6 ){
+            if (diff < 1e-6) {
                 n_good++;
-            }
-            else{
+            } else {
                 n_bad++;
             }
         }
@@ -107,8 +130,6 @@ int main(int argc, char** argv) {
         std::cout << "Good gradients: " << n_good << ", Bad gradients: " << n_bad << std::endl;
 
 
-
-
         return 0;
 
     }
diff --git a/src/examples/net_test_harmonic_oscilator.cpp b/src/examples/net_test_harmonic_oscilator.cpp
index 98cb82583a0d4deec92d33f88c77886977e43f65..374e4a2387b8ee9531baea2ce353e0129dd8c593 100644
--- a/src/examples/net_test_harmonic_oscilator.cpp
+++ b/src/examples/net_test_harmonic_oscilator.cpp
@@ -13,40 +13,59 @@
 
 #include "4neuro.h"
 
-void export_solution( size_t n_test_points, double te, double ts, l4n::DESolver &solver, l4n::MultiIndex &alpha, const std::string prefix ){
-    l4n::NeuralNetwork *solution = solver.get_solution( alpha );
+void export_solution(size_t n_test_points,
+                     double te,
+                     double ts,
+                     l4n::DESolver& solver,
+                     l4n::MultiIndex& alpha,
+                     const std::string prefix) {
+    l4n::NeuralNetwork* solution = solver.get_solution(alpha);
 
     char buff[256];
-    sprintf( buff, "%sdata_1d_osc.txt", prefix.c_str() );
-    std::string final_fn( buff );
-
-    std::ofstream ofs(final_fn, std::ofstream::out);
-    printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), 0.0);
+    sprintf(buff,
+            "%sdata_1d_osc.txt",
+            prefix.c_str());
+    std::string final_fn(buff);
+
+    std::ofstream ofs(final_fn,
+                      std::ofstream::out);
+    printf("Exporting files '%s': %7.3f%%\r",
+           final_fn.c_str(),
+           0.0);
     double frac = (te - ts) / (n_test_points - 1), x;
 
     std::vector<double> inp(1), out(1);
 
-    for(size_t i = 0; i < n_test_points; ++i){
+    for (size_t i = 0; i < n_test_points; ++i) {
         x = frac * i + ts;
 
         inp[0] = x;
-        solution->eval_single(inp, out);
+        solution->eval_single(inp,
+                              out);
         ofs << i + 1 << " " << x << " " << out[0] << " " << std::endl;
 
-        printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), (100.0 * i) / (n_test_points - 1));
+        printf("Exporting files '%s': %7.3f%%\r",
+               final_fn.c_str(),
+               (100.0 * i) / (n_test_points - 1));
         std::cout.flush();
     }
-    printf("Exporting files '%s': %7.3f%%\n", final_fn.c_str(), 100.0);
+    printf("Exporting files '%s': %7.3f%%\n",
+           final_fn.c_str(),
+           100.0);
     std::cout.flush();
     ofs.close();
 }
 
-void optimize_via_particle_swarm( l4n::DESolver &solver, l4n::MultiIndex &alpha, size_t  max_iters, size_t n_particles ){
+void optimize_via_particle_swarm(l4n::DESolver& solver,
+                                 l4n::MultiIndex& alpha,
+                                 size_t max_iters,
+                                 size_t n_particles) {
 
     printf("Solution via the particle swarm optimization!\n");
-    std::vector<double> domain_bounds(2 * (solver.get_solution( alpha )->get_n_biases() + solver.get_solution( alpha )->get_n_weights()));
+    std::vector<double> domain_bounds(
+            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -76,39 +95,64 @@ void optimize_via_particle_swarm( l4n::DESolver &solver, l4n::MultiIndex &alpha,
             max_iters
     );
 
-    solver.solve( swarm );
+    solver.solve(swarm);
 }
 
-void optimize_via_gradient_descent( l4n::DESolver &solver, double accuracy ){
+void optimize_via_gradient_descent(l4n::DESolver& solver,
+                                   double accuracy) {
 
     printf("Solution via a gradient descent method!\n");
-    l4n::GradientDescent gd( accuracy, 1000 );
+    l4n::GradientDescent gd(accuracy,
+                            1000);
 
-    solver.randomize_parameters( );
-    solver.solve( gd );
+    solver.randomize_parameters();
+    solver.solve(gd);
 }
 
-void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
+void test_harmonic_oscilator_fixed_E(double EE,
+                                     double accuracy,
+                                     size_t n_inner_neurons,
+                                     size_t train_size,
+                                     double ds,
+                                     double de,
+                                     size_t n_test_points,
+                                     double ts,
+                                     double te,
+                                     size_t max_iters,
+                                     size_t n_particles) {
     std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     /* SOLVER SETUP */
     size_t n_inputs = 1;
     size_t n_equations = 1;
-    l4n::DESolver solver( n_equations, n_inputs, n_inner_neurons );
+    l4n::DESolver solver(n_equations,
+                         n_inputs,
+                         n_inner_neurons);
 
     /* SETUP OF THE EQUATIONS */
-    l4n::MultiIndex alpha_0( n_inputs );
-    l4n::MultiIndex alpha_2( n_inputs );
-    alpha_2.set_partial_derivative(0, 2);
+    l4n::MultiIndex alpha_0(n_inputs);
+    l4n::MultiIndex alpha_2(n_inputs);
+    alpha_2.set_partial_derivative(0,
+                                   2);
 
     /* the governing differential equation */
     char buff[255];
-    std::sprintf(buff, "%f", -EE);
+    std::sprintf(buff,
+                 "%f",
+                 -EE);
     std::string eigenvalue(buff);
-    solver.add_to_differential_equation( 0, alpha_2, "-1.0" );
-    solver.add_to_differential_equation( 0, alpha_0, "x^2" );
-    solver.add_to_differential_equation( 0, alpha_0, eigenvalue );
+    solver.add_to_differential_equation(0,
+                                        alpha_2,
+                                        "-1.0");
+    solver.add_to_differential_equation(0,
+                                        alpha_0,
+                                        "x^2");
+    solver.add_to_differential_equation(0,
+                                        alpha_0,
+                                        eigenvalue);
 
     /* SETUP OF THE TRAINING DATA */
     std::vector<double> inp, out;
@@ -121,34 +165,52 @@ void test_harmonic_oscilator_fixed_E(double EE, double accuracy, size_t n_inner_
 
     /* ISOTROPIC TRAIN SET */
     frac = (d1_e - d1_s) / (train_size - 1);
-    for(unsigned int i = 0; i < train_size; ++i){
+    for (unsigned int i = 0; i < train_size; ++i) {
         inp = {frac * i + d1_s};
         out = {0.0};
-        data_vec_g.emplace_back(std::make_pair(inp, out));
+        data_vec_g.emplace_back(std::make_pair(inp,
+                                               out));
     }
     inp = {0.0};
     out = {1.0};
-    data_vec_g.emplace_back(std::make_pair(inp, out));
+    data_vec_g.emplace_back(std::make_pair(inp,
+                                           out));
 
     l4n::DataSet ds_00(&data_vec_g);
 
     /* Placing the conditions into the solver */
-    solver.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+    solver.set_error_function(0,
+                              l4n::ErrorFunctionType::ErrorFuncMSE,
+                              &ds_00);
 
     /* PARTICLE SWARM TRAINING METHOD SETUP */
     size_t total_dim = (2 + n_inputs) * n_inner_neurons;
 
-    optimize_via_gradient_descent( solver, accuracy );
-    export_solution( n_test_points, te, ts, solver, alpha_0, "gradient_" );
+    optimize_via_gradient_descent(solver,
+                                  accuracy);
+    export_solution(n_test_points,
+                    te,
+                    ts,
+                    solver,
+                    alpha_0,
+                    "gradient_");
 }
 
 int main() {
     std::cout << "Running lib4neuro harmonic Oscilator example   1" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
     std::cout << "          Governing equation: -y''(x) + x^2 * y(x) = E * y(x)" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-    std::cout << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons" <<std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
+    std::cout
+            << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
+            << std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     double EE = -1.0;
     unsigned int n_inner_neurons = 2;
@@ -163,7 +225,17 @@ int main() {
 
     size_t particle_swarm_max_iters = 1000;
     size_t n_particles = 100;
-    test_harmonic_oscilator_fixed_E(EE, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
+    test_harmonic_oscilator_fixed_E(EE,
+                                    accuracy,
+                                    n_inner_neurons,
+                                    train_size,
+                                    ds,
+                                    de,
+                                    test_size,
+                                    ts,
+                                    te,
+                                    particle_swarm_max_iters,
+                                    n_particles);
 
     return 0;
 }
diff --git a/src/examples/net_test_ode_1.cpp b/src/examples/net_test_ode_1.cpp
index 8d0cff53dd84db50eee708eb13d6660f08ba8b27..e431de035b4d5569d783e20a02a2c5ff4d66895a 100644
--- a/src/examples/net_test_ode_1.cpp
+++ b/src/examples/net_test_ode_1.cpp
@@ -21,12 +21,16 @@
 #include <chrono>
 #include "4neuro.h"
 
-void optimize_via_particle_swarm( l4n::DESolver &solver, l4n::MultiIndex &alpha, size_t  max_iters, size_t n_particles ){
+void optimize_via_particle_swarm(l4n::DESolver& solver,
+                                 l4n::MultiIndex& alpha,
+                                 size_t max_iters,
+                                 size_t n_particles) {
 
     printf("Solution via the particle swarm optimization!\n");
-    std::vector<double> domain_bounds(2 * (solver.get_solution( alpha )->get_n_biases() + solver.get_solution( alpha )->get_n_weights()));
+    std::vector<double> domain_bounds(
+            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -56,91 +60,143 @@ void optimize_via_particle_swarm( l4n::DESolver &solver, l4n::MultiIndex &alpha,
             max_iters
     );
 
-    solver.solve( swarm );
+    solver.solve(swarm);
 
 }
 
-void optimize_via_gradient_descent(l4n::DESolver &solver, double accuracy ){
+void optimize_via_gradient_descent(l4n::DESolver& solver,
+                                   double accuracy) {
     printf("Solution via a gradient descent method!\n");
-    l4n::GradientDescent gd( accuracy, 1000 , 500000);
+    l4n::GradientDescent gd(accuracy,
+                            1000,
+                            500000);
 
-    solver.randomize_parameters( );
-    solver.solve( gd );
+    solver.randomize_parameters();
+    solver.solve(gd);
 }
 
-void export_solution( size_t n_test_points, double te, double ts,l4n::DESolver &solver, l4n::MultiIndex &alpha_0, l4n::MultiIndex &alpha_1, l4n::MultiIndex &alpha_2, const std::string prefix ){
-    l4n::NeuralNetwork *solution = solver.get_solution( alpha_0 );
-    l4n::NeuralNetwork *solution_d = solver.get_solution( alpha_1 );
-    l4n::NeuralNetwork *solution_dd = solver.get_solution( alpha_2 );
+void export_solution(size_t n_test_points,
+                     double te,
+                     double ts,
+                     l4n::DESolver& solver,
+                     l4n::MultiIndex& alpha_0,
+                     l4n::MultiIndex& alpha_1,
+                     l4n::MultiIndex& alpha_2,
+                     const std::string prefix) {
+    l4n::NeuralNetwork* solution = solver.get_solution(alpha_0);
+    l4n::NeuralNetwork* solution_d = solver.get_solution(alpha_1);
+    l4n::NeuralNetwork* solution_dd = solver.get_solution(alpha_2);
 
     /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
     /* first boundary condition & its error */
 
     char buff[256];
-    sprintf( buff, "%sdata_1d_ode1.txt", prefix.c_str() );
-    std::string final_fn( buff );
-
-    std::ofstream ofs(final_fn, std::ofstream::out);
-    printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), 0.0);
+    sprintf(buff,
+            "%sdata_1d_ode1.txt",
+            prefix.c_str());
+    std::string final_fn(buff);
+
+    std::ofstream ofs(final_fn,
+                      std::ofstream::out);
+    printf("Exporting files '%s': %7.3f%%\r",
+           final_fn.c_str(),
+           0.0);
     double frac = (te - ts) / (n_test_points - 1);
 
     std::vector<double> inp(1), out(1);
 
-    for(size_t i = 0; i < n_test_points; ++i){
+    for (size_t i = 0; i < n_test_points; ++i) {
         double x = frac * i + ts;
         inp[0] = x;
 
-        solution->eval_single(inp, out);
+        solution->eval_single(inp,
+                              out);
         double F = out[0];
 
-        solution_d->eval_single( inp, out);
+        solution_d->eval_single(inp,
+                                out);
         double DF = out[0];
 
-        solution_dd->eval_single( inp, out);
+        solution_dd->eval_single(inp,
+                                 out);
         double DDF = out[0];
 
-        ofs << i + 1 << " " << x << " " << std::pow(l4n::E, -2*x) * (3*x + 1)<< " " << F << " "
-            << std::pow(l4n::E, -2*x) * (1 - 6*x)<< " " << DF << " " << 4 * std::pow(l4n::E, -2*x) * (3*x - 2)
+        ofs << i + 1 << " " << x << " " << std::pow(l4n::E,
+                                                    -2 * x) * (3 * x + 1) << " " << F << " "
+            << std::pow(l4n::E,
+                        -2 * x) * (1 - 6 * x) << " " << DF << " " << 4 * std::pow(l4n::E,
+                                                                                  -2 * x) * (3 * x - 2)
             << " " << DDF << std::endl;
 
-        printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), (100.0 * i) / (n_test_points - 1));
+        printf("Exporting files '%s': %7.3f%%\r",
+               final_fn.c_str(),
+               (100.0 * i) / (n_test_points - 1));
         std::cout.flush();
     }
-    printf("Exporting files '%s': %7.3f%%\r", final_fn.c_str(), 100.0);
+    printf("Exporting files '%s': %7.3f%%\r",
+           final_fn.c_str(),
+           100.0);
     std::cout.flush();
     ofs.close();
 
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
 }
 
-void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
+void test_ode(double accuracy,
+              size_t n_inner_neurons,
+              size_t train_size,
+              double ds,
+              double de,
+              size_t n_test_points,
+              double ts,
+              double te,
+              size_t max_iters,
+              size_t n_particles) {
 
     std::cout << "Finding a solution via the Particle Swarm Optimization and Gradient descent method!" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     /* SOLVER SETUP */
     size_t n_inputs = 1;
     size_t n_equations = 3;
-   l4n::DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
+    l4n::DESolver solver_01(n_equations,
+                            n_inputs,
+                            n_inner_neurons);
 
     /* SETUP OF THE EQUATIONS */
-    l4n::MultiIndex alpha_0( n_inputs );
-    l4n::MultiIndex alpha_1( n_inputs );
-    l4n::MultiIndex alpha_2( n_inputs );
-    alpha_2.set_partial_derivative(0, 2);
-    alpha_1.set_partial_derivative(0, 1);
+    l4n::MultiIndex alpha_0(n_inputs);
+    l4n::MultiIndex alpha_1(n_inputs);
+    l4n::MultiIndex alpha_2(n_inputs);
+    alpha_2.set_partial_derivative(0,
+                                   2);
+    alpha_1.set_partial_derivative(0,
+                                   1);
 
     /* the governing differential equation */
-    solver_01.add_to_differential_equation( 0, alpha_0, "4.0" );
-    solver_01.add_to_differential_equation( 0, alpha_1, "4.0" );
-    solver_01.add_to_differential_equation( 0, alpha_2, "1.0" );
+    solver_01.add_to_differential_equation(0,
+                                           alpha_0,
+                                           "4.0");
+    solver_01.add_to_differential_equation(0,
+                                           alpha_1,
+                                           "4.0");
+    solver_01.add_to_differential_equation(0,
+                                           alpha_2,
+                                           "1.0");
 
     /* dirichlet boundary condition */
-    solver_01.add_to_differential_equation( 1, alpha_0, "1.0" );
+    solver_01.add_to_differential_equation(1,
+                                           alpha_0,
+                                           "1.0");
 
     /* neumann boundary condition */
-    solver_01.add_to_differential_equation( 2, alpha_1, "1.0" );
+    solver_01.add_to_differential_equation(2,
+                                           alpha_1,
+                                           "1.0");
 
     /* SETUP OF THE TRAINING DATA */
     std::vector<double> inp, out;
@@ -154,58 +210,83 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
 
     /* ISOTROPIC TRAIN SET */
     frac = (d1_e - d1_s) / (train_size - 1);
-    for(unsigned int i = 0; i < train_size; ++i){
+    for (unsigned int i = 0; i < train_size; ++i) {
         inp = {frac * i};
         out = {0.0};
-        data_vec_g.emplace_back(std::make_pair(inp, out));
+        data_vec_g.emplace_back(std::make_pair(inp,
+                                               out));
 
         test_points[i] = inp[0];
     }
 
     /* CHEBYSCHEV TRAIN SET */
-   l4n::DataSet ds_00(&data_vec_g);
+    l4n::DataSet ds_00(&data_vec_g);
 
     /* TRAIN DATA FOR DIRICHLET BC */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_y;
     inp = {0.0};
     out = {1.0};
-    data_vec_y.emplace_back(std::make_pair(inp, out));
-   l4n::DataSet ds_01(&data_vec_y);
+    data_vec_y.emplace_back(std::make_pair(inp,
+                                           out));
+    l4n::DataSet ds_01(&data_vec_y);
 
     /* TRAIN DATA FOR NEUMANN BC */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
     inp = {0.0};
     out = {1.0};
-    data_vec_dy.emplace_back(std::make_pair(inp, out));
-   l4n::DataSet ds_02(&data_vec_dy);
+    data_vec_dy.emplace_back(std::make_pair(inp,
+                                            out));
+    l4n::DataSet ds_02(&data_vec_dy);
 
     /* Placing the conditions into the solver */
-    solver_01.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
-    solver_01.set_error_function( 1, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_01 );
-    solver_01.set_error_function( 2, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_02 );
+    solver_01.set_error_function(0,
+                                 l4n::ErrorFunctionType::ErrorFuncMSE,
+                                 &ds_00);
+    solver_01.set_error_function(1,
+                                 l4n::ErrorFunctionType::ErrorFuncMSE,
+                                 &ds_01);
+    solver_01.set_error_function(2,
+                                 l4n::ErrorFunctionType::ErrorFuncMSE,
+                                 &ds_02);
 
     /* TRAINING METHOD SETUP */
-  /*  optimize_via_particle_swarm( solver_01, alpha_0, max_iters, n_particles );
-    export_solution( n_test_points, te, ts, solver_01 , alpha_0, alpha_1, alpha_2, "particle_" );*/
-	auto start = std::chrono::system_clock::now();
-
-    optimize_via_gradient_descent( solver_01, accuracy );
-    export_solution( n_test_points, te, ts, solver_01 , alpha_0, alpha_1, alpha_2, "gradient_" );
-
-	auto end = std::chrono::system_clock::now();
-	std::chrono::duration<double> elapsed_seconds = end - start;
-	std::cout << "elapsed time: " << elapsed_seconds.count() << std::endl;
+    /*  optimize_via_particle_swarm( solver_01, alpha_0, max_iters, n_particles );
+      export_solution( n_test_points, te, ts, solver_01 , alpha_0, alpha_1, alpha_2, "particle_" );*/
+    auto start = std::chrono::system_clock::now();
+
+    optimize_via_gradient_descent(solver_01,
+                                  accuracy);
+    export_solution(n_test_points,
+                    te,
+                    ts,
+                    solver_01,
+                    alpha_0,
+                    alpha_1,
+                    alpha_2,
+                    "gradient_");
+
+    auto end = std::chrono::system_clock::now();
+    std::chrono::duration<double> elapsed_seconds = end - start;
+    std::cout << "elapsed time: " << elapsed_seconds.count() << std::endl;
 }
 
 int main() {
     std::cout << "Running lib4neuro Ordinary Differential Equation example   1" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
     std::cout << "          Governing equation: y''(x) + 4y'(x) + 4y(x) = 0.0, for x in [0, 4]" << std::endl;
     std::cout << "Dirichlet boundary condition:                  y(0.0) = 1.0" << std::endl;
     std::cout << "  Neumann boundary condition:                 y'(0.0) = 1.0" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-    std::cout << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons" <<std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
+    std::cout
+            << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
+            << std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     unsigned int n_inner_neurons = 2;
     unsigned int train_size = 10;
@@ -220,7 +301,16 @@ int main() {
     size_t particle_swarm_max_iters = 1000;
     size_t n_particles = 100;
 
-    test_ode(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
+    test_ode(accuracy,
+             n_inner_neurons,
+             train_size,
+             ds,
+             de,
+             test_size,
+             ts,
+             te,
+             particle_swarm_max_iters,
+             n_particles);
 
 
     return 0;
diff --git a/src/examples/net_test_pde_1.cpp b/src/examples/net_test_pde_1.cpp
index 667db01dbf1fc7068d7c6d59e667ea44d91c76f4..4614467783aa2b560d76184e6c6bdfcd1508fd3c 100644
--- a/src/examples/net_test_pde_1.cpp
+++ b/src/examples/net_test_pde_1.cpp
@@ -24,12 +24,16 @@
 
 #include "4neuro.h"
 
-void optimize_via_particle_swarm(l4n::DESolver &solver,l4n::MultiIndex &alpha, size_t  max_iters, size_t n_particles ){
+void optimize_via_particle_swarm(l4n::DESolver& solver,
+                                 l4n::MultiIndex& alpha,
+                                 size_t max_iters,
+                                 size_t n_particles) {
 
     printf("Solution via the particle swarm optimization!\n");
-    std::vector<double> domain_bounds(2 * (solver.get_solution( alpha )->get_n_biases() + solver.get_solution( alpha )->get_n_weights()));
+    std::vector<double> domain_bounds(
+            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -59,21 +63,30 @@ void optimize_via_particle_swarm(l4n::DESolver &solver,l4n::MultiIndex &alpha, s
             max_iters
     );
 
-    solver.solve( swarm );
+    solver.solve(swarm);
 }
 
-void optimize_via_gradient_descent(l4n::DESolver &solver, double accuracy ){
+void optimize_via_gradient_descent(l4n::DESolver& solver,
+                                   double accuracy) {
     printf("Solution via a gradient descent method!\n");
-    l4n::GradientDescent gd( accuracy, 1000 );
+    l4n::GradientDescent gd(accuracy,
+                            1000);
 
-    solver.randomize_parameters( );
-    solver.solve( gd );
+    solver.randomize_parameters();
+    solver.solve(gd);
 }
 
-void export_solution( size_t n_test_points, double te, double ts,l4n::DESolver &solver,l4n::MultiIndex &alpha_00,l4n::MultiIndex &alpha_01,l4n::MultiIndex &alpha_20, const std::string prefix ){
-    l4n::NeuralNetwork *solution = solver.get_solution( alpha_00 );
-    l4n::NeuralNetwork *solution_t = solver.get_solution( alpha_01 );
-    l4n::NeuralNetwork *solution_xx = solver.get_solution( alpha_20 );
+void export_solution(size_t n_test_points,
+                     double te,
+                     double ts,
+                     l4n::DESolver& solver,
+                     l4n::MultiIndex& alpha_00,
+                     l4n::MultiIndex& alpha_01,
+                     l4n::MultiIndex& alpha_20,
+                     const std::string prefix) {
+    l4n::NeuralNetwork* solution = solver.get_solution(alpha_00);
+    l4n::NeuralNetwork* solution_t = solver.get_solution(alpha_01);
+    l4n::NeuralNetwork* solution_xx = solver.get_solution(alpha_20);
 
     size_t i, j;
     double x, t;
@@ -81,118 +94,181 @@ void export_solution( size_t n_test_points, double te, double ts,l4n::DESolver &
     /* first boundary condition & its error */
 
     char buff[256];
-    sprintf( buff, "%sdata_2d_pde1_y.txt", prefix.c_str() );
-    std::string final_fn( buff );
-
-    printf("Exporting file '%s' : %7.3f%%\r", final_fn.c_str( ), 0.0 );
+    sprintf(buff,
+            "%sdata_2d_pde1_y.txt",
+            prefix.c_str());
+    std::string final_fn(buff);
+
+    printf("Exporting file '%s' : %7.3f%%\r",
+           final_fn.c_str(),
+           0.0);
     std::cout.flush();
 
     std::vector<double> input(2), output(1), output_t(1), output_xx(1);
-    std::ofstream ofs(final_fn, std::ofstream::out);
+    std::ofstream ofs(final_fn,
+                      std::ofstream::out);
     double frac = (te - ts) / (n_test_points - 1);
-    for(i = 0; i < n_test_points; ++i){
+    for (i = 0; i < n_test_points; ++i) {
         x = i * frac + ts;
-        for(j = 0; j < n_test_points; ++j){
+        for (j = 0; j < n_test_points; ++j) {
             t = j * frac + ts;
             input = {x, t};
 
-            solution->eval_single( input, output );
+            solution->eval_single(input,
+                                  output);
 
             ofs << x << " " << t << " " << output[0] << std::endl;
-            printf("Exporting file '%s' : %7.3f%%\r", final_fn.c_str(), (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
+            printf("Exporting file '%s' : %7.3f%%\r",
+                   final_fn.c_str(),
+                   (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
             std::cout.flush();
         }
     }
-    printf("Exporting file '%s' : %7.3f%%\n", final_fn.c_str(), 100.0);
+    printf("Exporting file '%s' : %7.3f%%\n",
+           final_fn.c_str(),
+           100.0);
     std::cout.flush();
     ofs.close();
 
     /* governing equation error */
-    sprintf( buff, "%sdata_2d_pde1_first_equation_error.txt", prefix.c_str() );
-    final_fn = std::string( buff );
-
-    ofs = std::ofstream(final_fn, std::ofstream::out);
-    printf("Exporting file '%s' : %7.3f%%\r", final_fn.c_str(), 0.0);
-    for(i = 0; i < n_test_points; ++i){
+    sprintf(buff,
+            "%sdata_2d_pde1_first_equation_error.txt",
+            prefix.c_str());
+    final_fn = std::string(buff);
+
+    ofs = std::ofstream(final_fn,
+                        std::ofstream::out);
+    printf("Exporting file '%s' : %7.3f%%\r",
+           final_fn.c_str(),
+           0.0);
+    for (i = 0; i < n_test_points; ++i) {
         x = i * frac + ts;
-        for(j = 0; j < n_test_points; ++j){
+        for (j = 0; j < n_test_points; ++j) {
             t = j * frac + ts;
             input = {x, t};
 
-            solution_t->eval_single( input, output_t );
-            solution_xx->eval_single( input, output_xx );
+            solution_t->eval_single(input,
+                                    output_t);
+            solution_xx->eval_single(input,
+                                     output_xx);
 
             ofs << x << " " << t << " " << std::fabs(output_xx[0] - output_t[0]) << std::endl;
-            printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r", (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
+            printf("Exporting file 'data_2d_pde1_first_equation_error.txt' : %7.3f%%\r",
+                   (100.0 * (j + i * n_test_points)) / (n_test_points * n_test_points - 1));
             std::cout.flush();
         }
     }
-    printf("Exporting file '%s' : %7.3f%%\n", final_fn.c_str(), 100.0);
+    printf("Exporting file '%s' : %7.3f%%\n",
+           final_fn.c_str(),
+           100.0);
     std::cout.flush();
     ofs.close();
 
     /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
     /* first boundary condition & its error */
-    sprintf( buff, "%sdata_1d_pde1_yt.txt", prefix.c_str() );
+    sprintf(buff,
+            "%sdata_1d_pde1_yt.txt",
+            prefix.c_str());
     std::string final_fn_t(buff);
 
-    sprintf( buff, "%sdata_1d_pde1_yx.txt", prefix.c_str() );
+    sprintf(buff,
+            "%sdata_1d_pde1_yx.txt",
+            prefix.c_str());
     std::string final_fn_x(buff);
 
-    ofs = std::ofstream(final_fn_t, std::ofstream::out);
-    std::ofstream ofs2(final_fn_x, std::ofstream::out);
-    printf("Exporting files '%s' and '%s' : %7.3f%%\r", final_fn_t.c_str(), final_fn_x.c_str(), 0.0);
-    for(i = 0; i < n_test_points; ++i){
+    ofs = std::ofstream(final_fn_t,
+                        std::ofstream::out);
+    std::ofstream ofs2(final_fn_x,
+                       std::ofstream::out);
+    printf("Exporting files '%s' and '%s' : %7.3f%%\r",
+           final_fn_t.c_str(),
+           final_fn_x.c_str(),
+           0.0);
+    for (i = 0; i < n_test_points; ++i) {
         x = frac * i + ts;
         t = frac * i + ts;
 
         double yt = std::sin(t);
-        double yx = std::pow(l4n::E, -0.707106781 * x) * std::sin( -0.707106781 * x );
+        double yx = std::pow(l4n::E,
+                             -0.707106781 * x) * std::sin(-0.707106781 * x);
 
         input = {0, t};
-        solution->eval_single(input, output);
+        solution->eval_single(input,
+                              output);
         double evalt = output[0];
 
         input = {x, 0};
-        solution->eval_single(input, output);
+        solution->eval_single(input,
+                              output);
         double evalx = output[0];
 
         ofs << i + 1 << " " << t << " " << yt << " " << evalt << " " << std::fabs(evalt - yt) << std::endl;
         ofs2 << i + 1 << " " << x << " " << yx << " " << evalx << " " << std::fabs(evalx - yx) << std::endl;
 
-        printf("Exporting files '%s' and '%s' : %7.3f%%\r", final_fn_t.c_str(), final_fn_x.c_str(), (100.0 * i) / (n_test_points - 1));
+        printf("Exporting files '%s' and '%s' : %7.3f%%\r",
+               final_fn_t.c_str(),
+               final_fn_x.c_str(),
+               (100.0 * i) / (n_test_points - 1));
         std::cout.flush();
     }
-    printf("Exporting files '%s' and '%s' : %7.3f%%\n", final_fn_t.c_str(), final_fn_x.c_str(), 100.0);
+    printf("Exporting files '%s' and '%s' : %7.3f%%\n",
+           final_fn_t.c_str(),
+           final_fn_x.c_str(),
+           100.0);
     std::cout.flush();
     ofs2.close();
     ofs.close();
 
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 }
-void test_pde(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
+
+void test_pde(double accuracy,
+              size_t n_inner_neurons,
+              size_t train_size,
+              double ds,
+              double de,
+              size_t n_test_points,
+              double ts,
+              double te,
+              size_t max_iters,
+              size_t n_particles) {
 
     /* do not change below */
     size_t n_inputs = 2;
     size_t n_equations = 3;
-    l4n::DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
+    l4n::DESolver solver_01(n_equations,
+                            n_inputs,
+                            n_inner_neurons);
 
     /* SETUP OF THE EQUATIONS */
-    l4n::MultiIndex alpha_00( n_inputs );
-    l4n::MultiIndex alpha_01( n_inputs );
-    l4n::MultiIndex alpha_20( n_inputs );
+    l4n::MultiIndex alpha_00(n_inputs);
+    l4n::MultiIndex alpha_01(n_inputs);
+    l4n::MultiIndex alpha_20(n_inputs);
 
-    alpha_00.set_partial_derivative(0, 0);
-    alpha_01.set_partial_derivative(1, 1);
-    alpha_20.set_partial_derivative(0, 2);
+    alpha_00.set_partial_derivative(0,
+                                    0);
+    alpha_01.set_partial_derivative(1,
+                                    1);
+    alpha_20.set_partial_derivative(0,
+                                    2);
 
     /* the governing differential equation */
-    solver_01.add_to_differential_equation( 0, alpha_20,  "1.0" );
-    solver_01.add_to_differential_equation( 0, alpha_01, "-1.0" );
+    solver_01.add_to_differential_equation(0,
+                                           alpha_20,
+                                           "1.0");
+    solver_01.add_to_differential_equation(0,
+                                           alpha_01,
+                                           "-1.0");
 
     /* dirichlet boundary condition */
-    solver_01.add_to_differential_equation( 1, alpha_00, "1.0" );
-    solver_01.add_to_differential_equation( 2, alpha_00, "1.0" );
+    solver_01.add_to_differential_equation(1,
+                                           alpha_00,
+                                           "1.0");
+    solver_01.add_to_differential_equation(2,
+                                           alpha_00,
+                                           "1.0");
 
 
     /* SETUP OF THE TRAINING DATA */
@@ -205,25 +281,31 @@ void test_pde(double accuracy, size_t n_inner_neurons, size_t train_size, double
     std::vector<double> test_bounds_2d = {ds, de, ds, de};
 
     /* GOVERNING EQUATION RHS */
-    auto f1 = [](std::vector<double>&input) -> std::vector<double> {
+    auto f1 = [](std::vector<double>& input) -> std::vector<double> {
         std::vector<double> output(1);
         output[0] = 0.0;
         return output;
     };
-    l4n::DataSet ds_00(test_bounds_2d, train_size, f1, 1);
+    l4n::DataSet ds_00(test_bounds_2d,
+                       train_size,
+                       f1,
+                       1);
 
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_t;
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_x;
     /* ISOTROPIC TRAIN SET */
     frac = (de - ds) / (train_size - 1);
-    for(unsigned int i = 0; i < train_size; ++i){
+    for (unsigned int i = 0; i < train_size; ++i) {
         inp = {0.0, frac * i};
         out = {std::sin(inp[1])};
-        data_vec_t.emplace_back(std::make_pair(inp, out));
+        data_vec_t.emplace_back(std::make_pair(inp,
+                                               out));
 
         inp = {frac * i, 0.0};
-        out = {std::pow(l4n::E, -0.707106781 * inp[0]) * std::sin( -0.707106781 * inp[0] )};
-        data_vec_x.emplace_back(std::make_pair(inp, out));
+        out = {std::pow(l4n::E,
+                        -0.707106781 * inp[0]) * std::sin(-0.707106781 * inp[0])};
+        data_vec_x.emplace_back(std::make_pair(inp,
+                                               out));
 
     }
     l4n::DataSet ds_t(&data_vec_t);
@@ -233,27 +315,63 @@ void test_pde(double accuracy, size_t n_inner_neurons, size_t train_size, double
 
 
     /* Placing the conditions into the solver */
-    solver_01.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
-    solver_01.set_error_function( 1, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_t );
-    solver_01.set_error_function( 2, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_x );
+    solver_01.set_error_function(0,
+                                 l4n::ErrorFunctionType::ErrorFuncMSE,
+                                 &ds_00);
+    solver_01.set_error_function(1,
+                                 l4n::ErrorFunctionType::ErrorFuncMSE,
+                                 &ds_t);
+    solver_01.set_error_function(2,
+                                 l4n::ErrorFunctionType::ErrorFuncMSE,
+                                 &ds_x);
 
     /* Solving the equation */
-    optimize_via_particle_swarm( solver_01, alpha_00, max_iters, n_particles );
-    export_solution( n_test_points, te, ts, solver_01 , alpha_00, alpha_01, alpha_20, "particle_" );
-
-    optimize_via_gradient_descent( solver_01, accuracy );
-    export_solution( n_test_points, te, ts, solver_01 , alpha_00, alpha_01, alpha_20, "gradient_" );
+    optimize_via_particle_swarm(solver_01,
+                                alpha_00,
+                                max_iters,
+                                n_particles);
+    export_solution(n_test_points,
+                    te,
+                    ts,
+                    solver_01,
+                    alpha_00,
+                    alpha_01,
+                    alpha_20,
+                    "particle_");
+
+    optimize_via_gradient_descent(solver_01,
+                                  accuracy);
+    export_solution(n_test_points,
+                    te,
+                    ts,
+                    solver_01,
+                    alpha_00,
+                    alpha_01,
+                    alpha_20,
+                    "gradient_");
 }
 
 int main() {
     std::cout << "Running lib4neuro Partial Differential Equation example   1" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-    std::cout << "          Governing equation: y_xx - y_t = 0,                                   for (x, t) in [0, 1] x [0, 1]" << std::endl;
-    std::cout << "Dirichlet boundary condition:    y(0, t) = sin(t),                              for t in [0, 1]" << std::endl;
-    std::cout << "Dirichlet boundary condition:    y(x, 0) = exp(-sqrt(0.5)x) * sin(-sqrt(0.5)x), for x in [0, 1]" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-    std::cout << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons" <<std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
+    std::cout
+            << "          Governing equation: y_xx - y_t = 0,                                   for (x, t) in [0, 1] x [0, 1]"
+            << std::endl;
+    std::cout << "Dirichlet boundary condition:    y(0, t) = sin(t),                              for t in [0, 1]"
+              << std::endl;
+    std::cout << "Dirichlet boundary condition:    y(x, 0) = exp(-sqrt(0.5)x) * sin(-sqrt(0.5)x), for x in [0, 1]"
+              << std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
+    std::cout
+            << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons"
+            << std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     unsigned int n_inner_neurons = 4;
     unsigned int train_size = 50;
@@ -267,7 +385,16 @@ int main() {
 
     size_t particle_swarm_max_iters = 1000;
     size_t n_particles = 50;
-    test_pde(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
+    test_pde(accuracy,
+             n_inner_neurons,
+             train_size,
+             ds,
+             de,
+             test_size,
+             ts,
+             te,
+             particle_swarm_max_iters,
+             n_particles);
 
     return 0;
 }
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index 0a733500df4481a9823c82ba9b22559156cbd1e7..c1d2cb51712bb1d6ce582082d45d3110ef1751a8 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -11,15 +11,21 @@
 
 int main() {
     std::cout << "Running lib4neuro Serialization example   1" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
     std::cout << "First, it finds an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50 + b" << std::endl;
     std::cout << "1 * w1 + 0.5*w2 = 0.75 + b" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
-    std::cout << "Then it stores the network with its weights into a file via serialization" <<std::endl;
-    std::cout << "Then it loads the network from a file via serialization" <<std::endl;
-    std::cout << "Finally it tests the loaded network parameters by evaluating the error function" <<std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
+    std::cout << "Then it stores the network with its weights into a file via serialization" << std::endl;
+    std::cout << "Then it loads the network from a file via serialization" << std::endl;
+    std::cout << "Finally it tests the loaded network parameters by evaluating the error function" << std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
@@ -27,11 +33,13 @@ int main() {
 
     inp = {0, 1};
     out = {0.5};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {1, 0.5};
     out = {0.75};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     l4n::DataSet ds(&data_vec);
 
@@ -48,18 +56,25 @@ int main() {
 
 
     /* Adding neurons to the net */
-    size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
-    size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
-    size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS);
+    size_t idx1 = net.add_neuron(i1,
+                                 l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx2 = net.add_neuron(i2,
+                                 l4n::BIAS_TYPE::NO_BIAS);
+    size_t idx3 = net.add_neuron(o1,
+                                 l4n::BIAS_TYPE::NEXT_BIAS);
 
     std::vector<double>* bv = net.get_parameter_ptr_biases();
-    for(size_t i = 0; i < 1; ++i){
+    for (size_t i = 0; i < 1; ++i) {
         bv->at(i) = 1.0;
     }
 
     /* Adding connections */
-    net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
-    net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx1,
+                              idx3,
+                              l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+    net.add_connection_simple(idx2,
+                              idx3,
+                              l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
 
     //net.randomize_weights();
 
@@ -75,12 +90,13 @@ int main() {
     net.specify_output_neurons(net_output_neurons_indices);
 
     /* ERROR FUNCTION SPECIFICATION */
-    l4n::MSE mse(&net, &ds);
+    l4n::MSE mse(&net,
+                 &ds);
 
     /* TRAINING METHOD SETUP */
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -111,50 +127,67 @@ int main() {
             n_particles,
             iter_max
     );
-    swarm_01.optimize( mse );
+    swarm_01.optimize(mse);
 
     std::vector<double>* parameters = swarm_01.get_parameters();
     net.copy_parameter_space(swarm_01.get_parameters());
 
-    printf("w1 = %10.7f\n", parameters->at( 0 ));
-    printf("w2 = %10.7f\n", parameters->at( 1 ));
-    printf(" b = %10.7f\n", parameters->at( 2 ));
+    printf("w1 = %10.7f\n",
+           parameters->at(0));
+    printf("w2 = %10.7f\n",
+           parameters->at(1));
+    printf(" b = %10.7f\n",
+           parameters->at(2));
 
 
     /* SAVE NETWORK TO THE FILE */
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
     std::cout << "Network generated by the example" << std::endl;
     net.write_stats();
     net.save_text("saved_network.4nt");
-    std::cout << "--------------------------------------------------------------------------------------------------------------------------------------------" <<std::endl;
+    std::cout
+            << "--------------------------------------------------------------------------------------------------------------------------------------------"
+            << std::endl;
     double error = 0.0;
     inp = {0, 1};
-    net.eval_single( inp, out );
+    net.eval_single(inp,
+                    out);
     error += (0.5 - out[0]) * (0.5 - out[0]);
     std::cout << "x = (0,   1), expected output: 0.50, real output: " << out[0] << std::endl;
 
     inp = {1, 0.5};
-    net.eval_single( inp, out );
+    net.eval_single(inp,
+                    out);
     error += (0.75 - out[0]) * (0.75 - out[0]);
     std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
     std::cout << "Error of the network: " << 0.5 * error << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
     std::cout << "Network loaded from a file" << std::endl;
     l4n::NeuralNetwork net2("saved_network.4nt");
     net2.write_stats();
-    std::cout << "--------------------------------------------------------------------------------------------------------------------------------------------" <<std::endl;
+    std::cout
+            << "--------------------------------------------------------------------------------------------------------------------------------------------"
+            << std::endl;
     error = 0.0;
     inp = {0, 1};
-    net2.eval_single( inp, out );
+    net2.eval_single(inp,
+                     out);
     error += (0.5 - out[0]) * (0.5 - out[0]);
     std::cout << "x = (0,   1), expected output: 0.50, real output: " << out[0] << std::endl;
 
     inp = {1, 0.5};
-    net2.eval_single( inp, out );
+    net2.eval_single(inp,
+                     out);
     error += (0.75 - out[0]) * (0.75 - out[0]);
     std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
     std::cout << "Error of the network: " << 0.5 * error << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
     return 0;
 }
\ No newline at end of file
diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp
index 3dd4e75a88b73e8c6bcfdcc1234b28a285856f07..f5eddb59a6cdc9d878785233e16e06a5344a1d78 100644
--- a/src/examples/seminar.cpp
+++ b/src/examples/seminar.cpp
@@ -15,31 +15,42 @@
 int main() {
 
     std::cout << std::endl << "Running lib4neuro Moldyn Seminar example" << std::endl;
-    std::cout << "********************************************************************************************************************************************" <<std::endl;
+    std::cout
+            << "********************************************************************************************************************************************"
+            << std::endl;
 
 
     l4n::NeuralNetwork XOR;
     std::shared_ptr<l4n::NeuronLinear> in1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> in2 = std::make_shared<l4n::NeuronLinear>();
-    size_t i1 = XOR.add_neuron( in1, l4n::BIAS_TYPE::NO_BIAS );
-    size_t i2 = XOR.add_neuron( in2, l4n::BIAS_TYPE::NO_BIAS );
+    size_t i1 = XOR.add_neuron(in1,
+                               l4n::BIAS_TYPE::NO_BIAS);
+    size_t i2 = XOR.add_neuron(in2,
+                               l4n::BIAS_TYPE::NO_BIAS);
 
     std::shared_ptr<l4n::NeuronLogistic> hn1 = std::make_shared<l4n::NeuronLogistic>();
     std::shared_ptr<l4n::NeuronLogistic> hn2 = std::make_shared<l4n::NeuronLogistic>();
-    size_t h1 = XOR.add_neuron( hn1 );
-    size_t h2 = XOR.add_neuron( hn2 );
+    size_t h1 = XOR.add_neuron(hn1);
+    size_t h2 = XOR.add_neuron(hn2);
 
     std::shared_ptr<l4n::NeuronLinear> on1 = std::make_shared<l4n::NeuronLinear>();
-    size_t o1 = XOR.add_neuron( on1, l4n::BIAS_TYPE::NO_BIAS );
+    size_t o1 = XOR.add_neuron(on1,
+                               l4n::BIAS_TYPE::NO_BIAS);
 
-    XOR.add_connection_simple( i1, h1 );
-    XOR.add_connection_simple( i2, h1 );
+    XOR.add_connection_simple(i1,
+                              h1);
+    XOR.add_connection_simple(i2,
+                              h1);
 
-    XOR.add_connection_simple( i1, h2 );
-    XOR.add_connection_simple( i2, h2 );
+    XOR.add_connection_simple(i1,
+                              h2);
+    XOR.add_connection_simple(i2,
+                              h2);
 
-    XOR.add_connection_simple( h1, o1 );
-    XOR.add_connection_simple( h2, o1 );
+    XOR.add_connection_simple(h1,
+                              o1);
+    XOR.add_connection_simple(h2,
+                              o1);
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
@@ -47,19 +58,23 @@ int main() {
 
     inp = {0, 0};
     out = {0};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {0, 1};
     out = {1};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {1, 0};
     out = {1};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     inp = {1, 1};
     out = {0};
-    data_vec.emplace_back(std::make_pair(inp, out));
+    data_vec.emplace_back(std::make_pair(inp,
+                                         out));
 
     l4n::DataSet ds(&data_vec);
 
@@ -76,14 +91,15 @@ int main() {
 
 
     /* ERROR FUNCTION SPECIFICATION */
-    l4n::MSE mse(&XOR, &ds);
+    l4n::MSE mse(&XOR,
+                 &ds);
 
 
 
     /* TRAINING METHOD SETUP */
     std::vector<double> domain_bounds(2 * (XOR.get_n_weights() + XOR.get_n_biases()));
 
-    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+    for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
         domain_bounds[2 * i] = -10;
         domain_bounds[2 * i + 1] = 10;
     }
@@ -114,33 +130,36 @@ int main() {
             n_particles,
             iter_max
     );
-    swarm_01.optimize( mse );
+    swarm_01.optimize(mse);
 
-    XOR.copy_parameter_space(swarm_01.get_parameters( ));
+    XOR.copy_parameter_space(swarm_01.get_parameters());
 
     /* ERROR CALCULATION */
     double error = 0.0;
     inp = {0, 0};
-    XOR.eval_single( inp, out );
+    XOR.eval_single(inp,
+                    out);
     error += (0 - out[0]) * (0 - out[0]);
     std::cout << "x = (0,   0), expected output: 0, real output: " << out[0] << std::endl;
 
     inp = {0, 1};
-    XOR.eval_single( inp, out );
+    XOR.eval_single(inp,
+                    out);
     error += (1 - out[0]) * (1 - out[0]);
     std::cout << "x = (0,   1), expected output: 1, real output: " << out[0] << std::endl;
 
     inp = {1, 0};
-    XOR.eval_single( inp, out );
+    XOR.eval_single(inp,
+                    out);
     error += (1 - out[0]) * (1 - out[0]);
     std::cout << "x = (1,   0), expected output: 1, real output: " << out[0] << std::endl;
 
     inp = {1, 1};
-    XOR.eval_single( inp, out );
+    XOR.eval_single(inp,
+                    out);
     error += (0 - out[0]) * (0 - out[0]);
     std::cout << "x = (1,   1), expected output: 0, real output: " << out[0] << std::endl;
 
 
-
     return 0;
 }
diff --git a/src/examples/x2_fitting.cpp b/src/examples/x2_fitting.cpp
index 6e00de091e9a5226cb500e0581de58a6ea899df1..7d8d3fb2b2b82dd8dc8164caf3c980c310fec151 100644
--- a/src/examples/x2_fitting.cpp
+++ b/src/examples/x2_fitting.cpp
@@ -5,20 +5,27 @@
 
 int main() {
 
-    l4n::CSVReader reader("x2_data.txt", "\t", true);
+    l4n::CSVReader reader("x2_data.txt",
+                          "\t",
+                          true);
     reader.read();
 
     std::vector<unsigned int> input_ind = {0};
     std::vector<unsigned int> output_ind = {1};
-    std::shared_ptr<l4n::DataSet> ds = reader.get_data_set(&input_ind, &output_ind);
+    std::shared_ptr<l4n::DataSet> ds = reader.get_data_set(&input_ind,
+                                                           &output_ind);
 
     std::vector<unsigned int> neuron_numbers_in_layers = {1, 15, 1};
     std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC};
-    l4n::FullyConnectedFFN net(&neuron_numbers_in_layers, &hidden_type_v);
+    l4n::FullyConnectedFFN net(&neuron_numbers_in_layers,
+                               &hidden_type_v);
 
-    l4n::MSE mse(&net, ds.get());
+    l4n::MSE mse(&net,
+                 ds.get());
 
-    l4n::GradientDescent gs(1e-5, 20, 200000);
+    l4n::GradientDescent gs(1e-5,
+                            20,
+                            200000);
 
     net.randomize_parameters();
     gs.optimize(mse);
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 97f08efe2d1dd6dbafa9227bf53f5df519219b4f..7e5c34f1ee7b97cd2ccb866a6a2af0fbe0b51b58 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -61,23 +61,23 @@ target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_
 
 
 set_target_properties(
-    linear_neuron_test
-    constant_neuron_test
-    binary_neuron_test
-    logistic_neuron_test
-    connectionFunctionGeneral_test
-    connection_Function_identity_test
-    neural_network_test
-    dataset_test
-    particle_swarm_test
-    particle_test
-    NeuralNetworkSum_test
-    errorfunction_test
-    DESolver_test
-#    GradientDescent_test
-
-
-    PROPERTIES
+        linear_neuron_test
+        constant_neuron_test
+        binary_neuron_test
+        logistic_neuron_test
+        connectionFunctionGeneral_test
+        connection_Function_identity_test
+        neural_network_test
+        dataset_test
+        particle_swarm_test
+        particle_test
+        NeuralNetworkSum_test
+        errorfunction_test
+        DESolver_test
+        #    GradientDescent_test
+
+
+        PROPERTIES
         ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
         LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
         RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/unit-tests"
diff --git a/src/tests/ConnectionFunctionGeneral_test.cpp b/src/tests/ConnectionFunctionGeneral_test.cpp
index 7675d51f9af5899011edbd2424f1cd68a7a36e94..5784a8628fe584c4ef32d5322c91328c4ba4560c 100644
--- a/src/tests/ConnectionFunctionGeneral_test.cpp
+++ b/src/tests/ConnectionFunctionGeneral_test.cpp
@@ -22,28 +22,28 @@ BOOST_AUTO_TEST_SUITE(Connection_test)
 /**
  * Test of constructor of Connection
  */
-    BOOST_AUTO_TEST_CASE(Connection_construction__test) {
+BOOST_AUTO_TEST_CASE(Connection_construction__test) {
 
-        BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral *functionGeneral = new ConnectionFunctionGeneral());
+        BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral * functionGeneral = new ConnectionFunctionGeneral());
 
         std::vector<size_t> param_indices;
         param_indices.push_back(0);
         std::string paramToFunction = "this do nothing! Why is it here?";
         BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral *functionGeneral = new ConnectionFunctionGeneral(param_indices,
-                                                                                                        paramToFunction));
-    }
+        paramToFunction));
+}
 
 
-    BOOST_AUTO_TEST_CASE(Connection_eval_test) {
-        ConnectionFunctionGeneral *functionGeneral = new ConnectionFunctionGeneral();
+BOOST_AUTO_TEST_CASE(Connection_eval_test) {
+        ConnectionFunctionGeneral * functionGeneral = new ConnectionFunctionGeneral();
         //TODO implementation not finnish yet;
         std::vector<double> parameter_space;
         BOOST_CHECK_EQUAL(0, functionGeneral->eval(parameter_space));
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(Connection_eval_partial_derivative_test) {
+BOOST_AUTO_TEST_CASE(Connection_eval_partial_derivative_test) {
         //TODO function not implemented yet
-    }
+}
 
 
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/tests/ConnectionFunctionIdentity_test.cpp b/src/tests/ConnectionFunctionIdentity_test.cpp
index bb859971abbbf08e01edebe72d88694d25ad3b4f..0bbd5ee22a4c1397c21207620b665b8fbb868f71 100644
--- a/src/tests/ConnectionFunctionIdentity_test.cpp
+++ b/src/tests/ConnectionFunctionIdentity_test.cpp
@@ -17,22 +17,22 @@
  */
 BOOST_AUTO_TEST_SUITE(ConnectionWeightIdentity_test)
 
-    /**
-     * Test of correct construction of ConnectionFunctionIdentity
-     */
-    BOOST_AUTO_TEST_CASE(ConnectionWeightIdentity_construction_test) {
-        std::vector<double> weight_array = {1, 2, 3, 4, 5};
+/**
+ * Test of correct construction of ConnectionFunctionIdentity
+ */
+BOOST_AUTO_TEST_CASE(ConnectionWeightIdentity_construction_test) {
+        std::vector<double> weight_array = { 1, 2, 3, 4, 5 };
         //Test of none exception when creation new instance of ConnectionFunctionIdentity
-        BOOST_CHECK_NO_THROW(ConnectionFunctionIdentity *CFI = new ConnectionFunctionIdentity() );
-        BOOST_CHECK_NO_THROW(ConnectionFunctionIdentity *CFI = new ConnectionFunctionIdentity(2) );
+        BOOST_CHECK_NO_THROW(ConnectionFunctionIdentity *CFI = new ConnectionFunctionIdentity());
+        BOOST_CHECK_NO_THROW(ConnectionFunctionIdentity *CFI = new ConnectionFunctionIdentity(2));
 
-    }
+}
 
-    /**
-     * Test of eval method
-     */
-    BOOST_AUTO_TEST_CASE(ConnectionWeightIdentity_eval_test) {
-        ConnectionFunctionIdentity *CFI1 = new ConnectionFunctionIdentity();
+/**
+ * Test of eval method
+ */
+BOOST_AUTO_TEST_CASE(ConnectionWeightIdentity_eval_test) {
+        ConnectionFunctionIdentity * CFI1 = new ConnectionFunctionIdentity();
         ConnectionFunctionIdentity *CFI2 = new ConnectionFunctionIdentity(0);
         ConnectionFunctionIdentity *CFI3 = new ConnectionFunctionIdentity(2);
 
@@ -44,6 +44,6 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeightIdentity_test)
         BOOST_CHECK_EQUAL(5, CFI2->eval(parameter_space));
         BOOST_CHECK_THROW(CFI3->eval(parameter_space), std::out_of_range);
 
-    }
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/DESolver_test.cpp b/src/tests/DESolver_test.cpp
index 92afe68fa2bcf66858e01180151bf81fd479d1cc..34f2df2b7fbac58af5ed2547fde9a4e440459ac2 100644
--- a/src/tests/DESolver_test.cpp
+++ b/src/tests/DESolver_test.cpp
@@ -20,39 +20,39 @@ using namespace lib4neuro;
  */
 BOOST_AUTO_TEST_SUITE(DESolver_test)
 
-    /**
-     * Test of MultiIndex construction test
-     */
-    BOOST_AUTO_TEST_CASE(MultiIndex_construction_test) {
+/**
+ * Test of MultiIndex construction test
+ */
+BOOST_AUTO_TEST_CASE(MultiIndex_construction_test) {
         BOOST_CHECK_NO_THROW(MultiIndex multiIndex(2));
-    }
+}
 
-    /**
-     * Test of MultiIndex set_partial_deravitive method
-     */
-    BOOST_AUTO_TEST_CASE(MultiIndex_set_partial_derivative_test) {
+/**
+ * Test of MultiIndex set_partial_deravitive method
+ */
+BOOST_AUTO_TEST_CASE(MultiIndex_set_partial_derivative_test) {
         MultiIndex multiIndex(2);
         BOOST_CHECK_NO_THROW(multiIndex.set_partial_derivative(0, 1));
         BOOST_CHECK_NO_THROW(multiIndex.set_partial_derivative(1, 2));
         //BOOST_CHECK_THROW(multiIndex.set_partial_derivative(2, 3), std::out_of_range);
-    }
+}
 
-    /**
-     * Testo of MultiIndex get_partial_derivative_degrees method
-     */
-    BOOST_AUTO_TEST_CASE(MultiIndex_get_partial_derivative_degrees_test) {
+/**
+ * Testo of MultiIndex get_partial_derivative_degrees method
+ */
+BOOST_AUTO_TEST_CASE(MultiIndex_get_partial_derivative_degrees_test) {
         MultiIndex multiIndex(2);
         multiIndex.set_partial_derivative(0, 1);
         multiIndex.set_partial_derivative(1, 2);
 
         BOOST_CHECK_EQUAL(1, multiIndex.get_partial_derivatives_degrees()->at(0));
         BOOST_CHECK_EQUAL(2, multiIndex.get_partial_derivatives_degrees()->at(1));
-    }
+}
 
-    /**
-     * Test of MultiIndex operator< method
-     */
-    BOOST_AUTO_TEST_CASE(MultiIndex_operator_test) {
+/**
+ * Test of MultiIndex operator< method
+ */
+BOOST_AUTO_TEST_CASE(MultiIndex_operator_test) {
         MultiIndex multiIndex1(1);
         multiIndex1.set_partial_derivative(0, 1);
         MultiIndex multiIndex2(2);
@@ -65,20 +65,20 @@ BOOST_AUTO_TEST_SUITE(DESolver_test)
         //BOOST_CHECK_THROW(multiIndex2.operator<(multiIndex1), std::out_of_range);
         BOOST_CHECK(!multiIndex1.operator<(multiIndex1));
         BOOST_CHECK(multiIndex1.operator<((multiIndex3)));
-    }
+}
 
-    /**
-     * Test of MultiIndex toString method
-     */
-    BOOST_AUTO_TEST_CASE(MultiIndex_toString_test) {
+/**
+ * Test of MultiIndex toString method
+ */
+BOOST_AUTO_TEST_CASE(MultiIndex_toString_test) {
         MultiIndex multiIndex(2);
         BOOST_CHECK_EQUAL("0, 0", multiIndex.to_string());
-    }
+}
 
-    /**
-     * Test of MultiIndex get_degree method
-     */
-    BOOST_AUTO_TEST_CASE(MultiIndex_get_degree_test) {
+/**
+ * Test of MultiIndex get_degree method
+ */
+BOOST_AUTO_TEST_CASE(MultiIndex_get_degree_test) {
         MultiIndex multiIndex(2);
         BOOST_CHECK_EQUAL(0, multiIndex.get_degree());
 
@@ -86,38 +86,41 @@ BOOST_AUTO_TEST_SUITE(DESolver_test)
         multiIndex.set_partial_derivative(1, 3);
 
         BOOST_CHECK_EQUAL(4, multiIndex.get_degree());
-    }
+}
 
-    /**
-     * Test of DESolver construction
-     */
-    BOOST_AUTO_TEST_CASE(DESolver_construction_test) {
-        BOOST_CHECK_THROW(DESolver(0, 1, 1), std::invalid_argument);
+/**
+ * Test of DESolver construction
+ */
+BOOST_AUTO_TEST_CASE(DESolver_construction_test) {
+        BOOST_CHECK_THROW(DESolver(0,
+                                   1,
+                                   1),
+                          std::invalid_argument);
         BOOST_CHECK_THROW(DESolver(1, 0, 1), std::invalid_argument);
         BOOST_CHECK_THROW(DESolver(1, 1, 0), std::invalid_argument);
         BOOST_CHECK_NO_THROW(DESolver deSolver(1, 1, 1));
 
-		//TODO fix it
-		//std::stringstream buffer1;
-		//std::streambuf * old1 = std::cout.rdbuf(buffer1.rdbuf());
-		//DESolver deSolver(1, 1, 1);
-		//std::string text = buffer1.str();
-  //      
-  //     // BOOST_CHECK(text._Equal("Differential Equation Solver with 1 equations\n--------------------------------------------------------------------------\nConstructing NN structure representing the solution [1 input neurons][1 inner neurons][1 output neurons]...\n  adding a connection between input neuron  0 and inner neuron  0, weight index 0\n  adding a connection between inner neuron  0 and output neuron  0, weight index 1\ndone\n\n"));
-		//std::cout.rdbuf(old1);
-    }
-
-    /**
-     * Test of DESolver get_solution method
-     */
-    BOOST_AUTO_TEST_CASE(DESolver_get_solution_test) {
+        //TODO fix it
+        //std::stringstream buffer1;
+        //std::streambuf * old1 = std::cout.rdbuf(buffer1.rdbuf());
+        //DESolver deSolver(1, 1, 1);
+        //std::string text = buffer1.str();
+        //
+        //     // BOOST_CHECK(text._Equal("Differential Equation Solver with 1 equations\n--------------------------------------------------------------------------\nConstructing NN structure representing the solution [1 input neurons][1 inner neurons][1 output neurons]...\n  adding a connection between input neuron  0 and inner neuron  0, weight index 0\n  adding a connection between inner neuron  0 and output neuron  0, weight index 1\ndone\n\n"));
+        //std::cout.rdbuf(old1);
+}
+
+/**
+ * Test of DESolver get_solution method
+ */
+BOOST_AUTO_TEST_CASE(DESolver_get_solution_test) {
         DESolver deSolver(1, 1, 1);
         MultiIndex *alpha = new MultiIndex(1);
         BOOST_CHECK_EQUAL(1, deSolver.get_solution(*alpha)->get_n_inputs());
         BOOST_CHECK_EQUAL(1, deSolver.get_solution(*alpha)->get_n_outputs());
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(DESolver_add_eq_test){
+BOOST_AUTO_TEST_CASE(DESolver_add_eq_test){
         /*DESolver *deSolver = new DESolver(1,1,1);
         MultiIndex *multiIndex = new MultiIndex(2);
         multiIndex->set_partial_derivative(0,1);
@@ -137,6 +140,6 @@ BOOST_AUTO_TEST_SUITE(DESolver_test)
         std::vector<double> weights;
         weights.push_back(1.0);
         BOOST_CHECK_EQUAL(64,deSolver->eval_total_error(weights));*/
-    }
-	
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/DataSet_test.cpp b/src/tests/DataSet_test.cpp
index b085b99e8eecde5785cab1777444e4ee1c387e27..8d88527239b1efd65f68fdac5ade31ce7c856265 100644
--- a/src/tests/DataSet_test.cpp
+++ b/src/tests/DataSet_test.cpp
@@ -8,19 +8,19 @@
 #define BOOST_TEST_MODULE DataSet_test
 
 #ifdef _WINDOWS
-	#include <boost/test/included/unit_test.hpp>
+#include <boost/test/included/unit_test.hpp>
 #endif
 
-	#ifndef BOOST_TEST_DYN_LINK
-	#define BOOST_TEST_DYN_LINK
-	#endif
+#ifndef BOOST_TEST_DYN_LINK
+#define BOOST_TEST_DYN_LINK
+#endif
 
-	#ifndef BOOST_TEST_NO_MAIN
-	#define BOOST_TEST_NO_MAIN
-	#endif
+#ifndef BOOST_TEST_NO_MAIN
+#define BOOST_TEST_NO_MAIN
+#endif
 
-	#include <boost/test/unit_test.hpp>
-	#include <boost/test/output_test_stream.hpp>
+#include <boost/test/unit_test.hpp>
+#include <boost/test/output_test_stream.hpp>
 
 
 #include "../DataSet/DataSet.h"
@@ -34,22 +34,21 @@
 BOOST_AUTO_TEST_SUITE(DataSet_test)
 
 
-
 /**
  * Test of lib4neuro::DataSet constructor with filepath parameter
  */
-    BOOST_AUTO_TEST_CASE(DataSet_construction_from_file_test) {
+BOOST_AUTO_TEST_CASE(DataSet_construction_from_file_test) {
         //test of exception with non-existing file path
         //TODO resolve exception throw
         //lib4neuro::DataSet DataSet("file/unknown");
 
-		//BOOST_CHECK_THROW(lib4neuro::DataSet DataSet("file unknown"), std::out_of_range);// boost::archive::archive_exception::input_stream_error);
-    }
+        //BOOST_CHECK_THROW(lib4neuro::DataSet DataSet("file unknown"), std::out_of_range);// boost::archive::archive_exception::input_stream_error);
+}
 
 /**
  * Test of DataSet constructor with vector parameter
  */
-    BOOST_AUTO_TEST_CASE(DataSet_construction_from_vector_test) {
+BOOST_AUTO_TEST_CASE(DataSet_construction_from_vector_test) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
         std::vector<double> inp, out;
 
@@ -64,7 +63,7 @@ BOOST_AUTO_TEST_SUITE(DataSet_test)
 
         //test of no exception when create object DataSet
         BOOST_CHECK_NO_THROW(new lib4neuro::DataSet(&data_vec));
-    }
+}
 
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/ErrorFunctions_test.cpp b/src/tests/ErrorFunctions_test.cpp
index 02d5b4bb18c0ce45f22f5ff021e1c63abd9320dc..2e5fe79f36adc05c4b7fbf30c2da1c7dff87890f 100644
--- a/src/tests/ErrorFunctions_test.cpp
+++ b/src/tests/ErrorFunctions_test.cpp
@@ -12,58 +12,116 @@
 
 using namespace lib4neuro;
 
-MOCK_BASE_CLASS(mock_network, lib4neuro::NeuralNetwork)
+MOCK_BASE_CLASS(mock_network, lib4neuro::NeuralNetwork
+)
 {
-	MOCK_METHOD(get_subnet, 2)
-	MOCK_METHOD(add_neuron, 3)
-	MOCK_METHOD(add_connection_simple, 4)
-	MOCK_METHOD(add_existing_connection, 4)
-	MOCK_METHOD(copy_parameter_space, 1)
-	MOCK_METHOD(set_parameter_space_pointers, 1)
-	MOCK_METHOD(eval_single, 3)
-	MOCK_METHOD(add_to_gradient_single, 4)
-	MOCK_METHOD(randomize_weights, 0)
-	MOCK_METHOD(randomize_biases, 0)
-	MOCK_METHOD(randomize_parameters, 0)
-	MOCK_METHOD(get_n_inputs, 0)
-	MOCK_METHOD(get_n_outputs, 0)
-	MOCK_METHOD(get_n_weights, 0)
-	MOCK_METHOD(get_n_biases, 0)
-	MOCK_METHOD(get_neuron_bias_index, 1)
-	MOCK_METHOD(get_n_neurons, 0)
-	MOCK_METHOD(specify_input_neurons, 1)
-	MOCK_METHOD(specify_output_neurons, 1)
-	MOCK_METHOD(get_parameter_ptr_biases, 0)
-	MOCK_METHOD(get_parameter_ptr_weights, 0)
-	MOCK_METHOD(save_text, 1)
-	MOCK_METHOD(write_weights, 0, void(), id1)
-	MOCK_METHOD(write_weights, 1, void(std::string), id2)
-	MOCK_METHOD(write_weights, 1, void(std::ofstream*), id3)
-	MOCK_METHOD(write_biases, 0, void(), id4)
-	MOCK_METHOD(write_biases, 1, void(std::string), id5)
-	MOCK_METHOD(write_biases, 1, void(std::ofstream*), id6)
-	MOCK_METHOD(write_stats, 0, void(), id7)
-	MOCK_METHOD(write_stats, 1, void(std::string), id8)
-	MOCK_METHOD(write_stats, 1, void(std::ofstream*), id9)
+MOCK_METHOD(get_subnet,
+2)
+MOCK_METHOD(add_neuron,
+3)
+MOCK_METHOD(add_connection_simple,
+4)
+MOCK_METHOD(add_existing_connection,
+4)
+MOCK_METHOD(copy_parameter_space,
+1)
+MOCK_METHOD(set_parameter_space_pointers,
+1)
+MOCK_METHOD(eval_single,
+3)
+MOCK_METHOD(add_to_gradient_single,
+4)
+MOCK_METHOD(randomize_weights,
+0)
+MOCK_METHOD(randomize_biases,
+0)
+MOCK_METHOD(randomize_parameters,
+0)
+MOCK_METHOD(get_n_inputs,
+0)
+MOCK_METHOD(get_n_outputs,
+0)
+MOCK_METHOD(get_n_weights,
+0)
+MOCK_METHOD(get_n_biases,
+0)
+MOCK_METHOD(get_neuron_bias_index,
+1)
+MOCK_METHOD(get_n_neurons,
+0)
+MOCK_METHOD(specify_input_neurons,
+1)
+MOCK_METHOD(specify_output_neurons,
+1)
+MOCK_METHOD(get_parameter_ptr_biases,
+0)
+MOCK_METHOD(get_parameter_ptr_weights,
+0)
+MOCK_METHOD(save_text,
+1)
+MOCK_METHOD(write_weights,
+0, void(), id1)
+MOCK_METHOD(write_weights,
+1,
+void(std::string), id2
+)
+MOCK_METHOD(write_weights,
+1,
+void(std::ofstream
+*), id3)
+MOCK_METHOD(write_biases,
+0, void(), id4)
+MOCK_METHOD(write_biases,
+1,
+void(std::string), id5
+)
+MOCK_METHOD(write_biases,
+1,
+void(std::ofstream
+*), id6)
+MOCK_METHOD(write_stats,
+0, void(), id7)
+MOCK_METHOD(write_stats,
+1,
+void(std::string), id8
+)
+MOCK_METHOD(write_stats,
+1,
+void(std::ofstream
+*), id9)
 };
 
-MOCK_BASE_CLASS(mock_dataSet, lib4neuro::DataSet)
+MOCK_BASE_CLASS(mock_dataSet, lib4neuro::DataSet
+)
 {
-	mock_dataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>> *i)
-		: lib4neuro::DataSet(i)
-	{
-
-	}
-		MOCK_METHOD(add_data_pair, 2)
-		MOCK_METHOD(get_n_elements, 0)
-		MOCK_METHOD(get_input_dim, 0)
-		MOCK_METHOD(get_output_dim, 0)
-		MOCK_METHOD(print_data, 0)
-		MOCK_METHOD(store_text, 1)
-		MOCK_METHOD(store_data_text, 1, void(std::string), id1)
-		MOCK_METHOD(store_data_text, 1, void(std::ofstream*), id2)
-};
+mock_dataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>>
+*i)
+:
+lib4neuro::DataSet(i)
+        {
 
+        }
+MOCK_METHOD(add_data_pair,
+2)
+MOCK_METHOD(get_n_elements,
+0)
+MOCK_METHOD(get_input_dim,
+0)
+MOCK_METHOD(get_output_dim,
+0)
+MOCK_METHOD(print_data,
+0)
+MOCK_METHOD(store_text,
+1)
+MOCK_METHOD(store_data_text,
+1,
+void(std::string), id1
+)
+MOCK_METHOD(store_data_text,
+1,
+void(std::ofstream
+*), id2)
+};
 
 
 /**
@@ -72,28 +130,28 @@ MOCK_BASE_CLASS(mock_dataSet, lib4neuro::DataSet)
  */
 BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
 
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Construction_Test) {
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Construction_Test) {
         mock_network network;
-		MOCK_EXPECT(network.get_n_biases).returns(1);
-		MOCK_EXPECT(network.get_n_weights).returns(1);
-		std::vector<double> inp, out;
-		std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
-		inp = { 0.0 };
-		out = { 8.0 };
-		data_vec_dy.emplace_back(std::make_pair(inp, out));
-		//DataSet ds_02(&data_vec_dy);
-		
-		
-		mock_dataSet dataSet(&data_vec_dy);
-
-		BOOST_CHECK_NO_THROW(MSE mse(&network, &dataSet));
-    }
-
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Eval_Test) {
+        MOCK_EXPECT(network.get_n_biases).returns(1);
+        MOCK_EXPECT(network.get_n_weights).returns(1);
+        std::vector<double> inp, out;
+        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
+        inp = { 0.0 };
+        out = { 8.0 };
+        data_vec_dy.emplace_back(std::make_pair(inp, out));
+        //DataSet ds_02(&data_vec_dy);
+
+
+        mock_dataSet dataSet(&data_vec_dy);
+
+        BOOST_CHECK_NO_THROW(MSE mse(&network, &dataSet));
+}
+
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Eval_Test) {
         mock_network network;
-		MOCK_EXPECT(network.get_n_biases).returns(1);
-		MOCK_EXPECT(network.get_n_weights).returns(1);
-		MOCK_EXPECT(network.eval_single);
+        MOCK_EXPECT(network.get_n_biases).returns(1);
+        MOCK_EXPECT(network.get_n_weights).returns(1);
+        MOCK_EXPECT(network.eval_single);
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
         std::vector<double> inp, out;
         for (int i = 0; i < 1; i++) {
@@ -102,7 +160,7 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
         }
         data_vec.emplace_back(std::make_pair(inp, out));
 
-		mock_dataSet dataSet(&data_vec);
+        mock_dataSet dataSet(&data_vec);
 
         std::vector<double> weights;
         weights.push_back(1);
@@ -110,12 +168,12 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
         MSE mse(&network, &dataSet);
 
         BOOST_CHECK_EQUAL(16, mse.eval(&weights));
-    }
-	
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Get_dimension_test) {
-		mock_network network;
-		MOCK_EXPECT(network.get_n_biases).returns(1);
-		MOCK_EXPECT(network.get_n_weights).returns(1);
+}
+
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Get_dimension_test) {
+        mock_network network;
+        MOCK_EXPECT(network.get_n_biases).returns(1);
+        MOCK_EXPECT(network.get_n_weights).returns(1);
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
         std::vector<double> inp, out;
         for (int i = 0; i < 1; i++) {
@@ -123,56 +181,56 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
             out.push_back(i + 4);
         }
         data_vec.emplace_back(std::make_pair(inp, out));
-       
-		mock_dataSet dataSet(&data_vec);
+
+        mock_dataSet dataSet(&data_vec);
 
 
         MSE mse(&network, &dataSet);
 
         BOOST_CHECK_EQUAL(2, mse.get_dimension());
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Construction_Test) {
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Construction_Test) {
         BOOST_CHECK_NO_THROW(ErrorSum mse_sum);
-    }
-	
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Add_Error_Function_Test) {
-        
-		mock_ErrorFunction f;
-		MOCK_EXPECT(f.get_dimension).returns(1);
+}
+
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Add_Error_Function_Test) {
+
+        mock_ErrorFunction f;
+        MOCK_EXPECT(f.get_dimension).returns(1);
+        ErrorSum mse_sum;
+        BOOST_CHECK_NO_THROW(mse_sum.add_error_function(&f, 1));
+}
+
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Eval_Test) {
+        mock_ErrorFunction f;
+        MOCK_EXPECT(f.get_dimension).returns(1);
+        MOCK_EXPECT(f.eval).returns(1.75);
         ErrorSum mse_sum;
-		BOOST_CHECK_NO_THROW(mse_sum.add_error_function(&f, 1));
-    }
-	
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Eval_Test) {
-		mock_ErrorFunction f;
-		MOCK_EXPECT(f.get_dimension).returns(1);
-		MOCK_EXPECT(f.eval).returns(1.75);
-		ErrorSum mse_sum;
 
         std::vector<double> weights;
         weights.push_back(1);
 
-		mse_sum.add_error_function(&f);
+        mse_sum.add_error_function(&f);
         BOOST_CHECK_EQUAL(1.75, mse_sum.eval(&weights));
-    }
-	
-    BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Get_Dimension_test) {
+}
+
+BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_SUM_Get_Dimension_test) {
         ErrorSum mse_sum;
         BOOST_CHECK_EQUAL(0, mse_sum.get_dimension());
-		mock_ErrorFunction f;
-		MOCK_EXPECT(f.get_dimension).returns(2);
-		MOCK_EXPECT(f.eval).returns(1.75);
-		
+        mock_ErrorFunction f;
+        MOCK_EXPECT(f.get_dimension).returns(2);
+        MOCK_EXPECT(f.eval).returns(1.75);
+
 
-		std::vector<double> weights;
-		weights.push_back(1);
+        std::vector<double> weights;
+        weights.push_back(1);
 
-		mse_sum.add_error_function(&f);
+        mse_sum.add_error_function(&f);
 
         BOOST_CHECK_EQUAL(2, mse_sum.get_dimension());
 
-    }
+}
 
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/NeuralNetworkSum_test.cpp b/src/tests/NeuralNetworkSum_test.cpp
index c200f96dcdf991461ddf0da9cc62ee8d6f8e344d..ead5d98a1e557c1a49a63df7ff229d225eefb0fb 100644
--- a/src/tests/NeuralNetworkSum_test.cpp
+++ b/src/tests/NeuralNetworkSum_test.cpp
@@ -12,67 +12,112 @@
 
 #include "../Network/NeuralNetworkSum.h"
 #include <turtle/mock.hpp>
+
 using namespace lib4neuro;
 
-MOCK_BASE_CLASS(mock_network, lib4neuro::NeuralNetwork)
+MOCK_BASE_CLASS(mock_network, lib4neuro::NeuralNetwork
+)
 {
-	MOCK_METHOD(get_subnet, 2)
-    MOCK_METHOD(add_neuron, 3)
-    MOCK_METHOD(add_connection_simple, 4)
-    MOCK_METHOD(add_existing_connection, 4)
-    MOCK_METHOD(copy_parameter_space, 1)
-    MOCK_METHOD(set_parameter_space_pointers, 1)
-    MOCK_METHOD(eval_single, 3)
-    MOCK_METHOD(add_to_gradient_single, 4)
-    MOCK_METHOD(randomize_weights, 0)
-    MOCK_METHOD(randomize_biases, 0)
-    MOCK_METHOD(randomize_parameters, 0)
-    MOCK_METHOD(get_n_inputs, 0)
-    MOCK_METHOD(get_n_outputs, 0)
-    MOCK_METHOD(get_n_weights, 0)
-    MOCK_METHOD(get_n_biases, 0)
-    MOCK_METHOD(get_neuron_bias_index, 1)
-    MOCK_METHOD(get_n_neurons, 0)
-    MOCK_METHOD(specify_input_neurons, 1)
-    MOCK_METHOD(specify_output_neurons, 1)
-    MOCK_METHOD(get_parameter_ptr_biases, 0)
-    MOCK_METHOD(get_parameter_ptr_weights, 0)
-    MOCK_METHOD(save_text, 1)
-    MOCK_METHOD(write_weights, 0, void(), id1)
-    MOCK_METHOD(write_weights, 1, void(std::string), id2)
-    MOCK_METHOD(write_weights, 1, void(std::ofstream*), id3)
-    MOCK_METHOD(write_biases, 0, void(), id4)
-    MOCK_METHOD(write_biases, 1, void(std::string), id5)
-    MOCK_METHOD(write_biases, 1, void(std::ofstream*), id6)
-    MOCK_METHOD(write_stats, 0, void(), id7)
-    MOCK_METHOD(write_stats, 1, void(std::string), id8)
-    MOCK_METHOD(write_stats, 1, void(std::ofstream*), id9)
+MOCK_METHOD(get_subnet,
+2)
+MOCK_METHOD(add_neuron,
+3)
+MOCK_METHOD(add_connection_simple,
+4)
+MOCK_METHOD(add_existing_connection,
+4)
+MOCK_METHOD(copy_parameter_space,
+1)
+MOCK_METHOD(set_parameter_space_pointers,
+1)
+MOCK_METHOD(eval_single,
+3)
+MOCK_METHOD(add_to_gradient_single,
+4)
+MOCK_METHOD(randomize_weights,
+0)
+MOCK_METHOD(randomize_biases,
+0)
+MOCK_METHOD(randomize_parameters,
+0)
+MOCK_METHOD(get_n_inputs,
+0)
+MOCK_METHOD(get_n_outputs,
+0)
+MOCK_METHOD(get_n_weights,
+0)
+MOCK_METHOD(get_n_biases,
+0)
+MOCK_METHOD(get_neuron_bias_index,
+1)
+MOCK_METHOD(get_n_neurons,
+0)
+MOCK_METHOD(specify_input_neurons,
+1)
+MOCK_METHOD(specify_output_neurons,
+1)
+MOCK_METHOD(get_parameter_ptr_biases,
+0)
+MOCK_METHOD(get_parameter_ptr_weights,
+0)
+MOCK_METHOD(save_text,
+1)
+MOCK_METHOD(write_weights,
+0, void(), id1)
+MOCK_METHOD(write_weights,
+1,
+void(std::string), id2
+)
+MOCK_METHOD(write_weights,
+1,
+void(std::ofstream
+*), id3)
+MOCK_METHOD(write_biases,
+0, void(), id4)
+MOCK_METHOD(write_biases,
+1,
+void(std::string), id5
+)
+MOCK_METHOD(write_biases,
+1,
+void(std::ofstream
+*), id6)
+MOCK_METHOD(write_stats,
+0, void(), id7)
+MOCK_METHOD(write_stats,
+1,
+void(std::string), id8
+)
+MOCK_METHOD(write_stats,
+1,
+void(std::ofstream
+*), id9)
 };
 
 /**
  * Boost testing suite for testing NeuralNetworkSum.h
  */
 BOOST_AUTO_TEST_SUITE(NeuralNetworkSum_test)
-    /**
-     * Test of creating new instance of NeuralNetworkSum
-     */
-    BOOST_AUTO_TEST_CASE(NeuralNetworkSum_constuction_test) {
+/**
+ * Test of creating new instance of NeuralNetworkSum
+ */
+BOOST_AUTO_TEST_CASE(NeuralNetworkSum_constuction_test) {
         //Test of none exception raise when creating new instance of NeuralNewtwork
         BOOST_CHECK_NO_THROW(NeuralNetworkSum networkSum);
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(NeuralNetworkSum_add_network_test) {
+BOOST_AUTO_TEST_CASE(NeuralNetworkSum_add_network_test) {
         mock_network network;
-		//NeuralNetwork network;
+        //NeuralNetwork network;
         NeuralNetworkSum networkSum;
-		std::string po = "f(x,y,z,t) =x+y+z+t";
+        std::string po = "f(x,y,z,t) =x+y+z+t";
         BOOST_CHECK_NO_THROW(networkSum.add_network(&network, po));
-    }
+}
 
-   BOOST_AUTO_TEST_CASE(NeuralNetworkSum_eval_single_weights_test) {
-       
-	   mock_network network;
-	   MOCK_EXPECT(network.eval_single);
+BOOST_AUTO_TEST_CASE(NeuralNetworkSum_eval_single_weights_test) {
+
+        mock_network network;
+        MOCK_EXPECT(network.eval_single);
 
         std::vector<double> input;
         input.push_back(1);
@@ -85,18 +130,18 @@ BOOST_AUTO_TEST_SUITE(NeuralNetworkSum_test)
 
         networkSum.eval_single(input, output);
         BOOST_CHECK_EQUAL(0, output.at(0));
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(NeuralNetworkSum_get_weights_test) {
+BOOST_AUTO_TEST_CASE(NeuralNetworkSum_get_weights_test) {
         NeuralNetworkSum networkSum;
         BOOST_CHECK_EQUAL(0, networkSum.get_n_weights());
 
-		mock_network network;
-		MOCK_EXPECT(network.get_n_weights).returns(1);
+        mock_network network;
+        MOCK_EXPECT(network.get_n_weights).returns(1);
         networkSum.add_network(&network, "f(x) =x");
 
         BOOST_CHECK_EQUAL(1, networkSum.get_n_weights());
-    }
+}
 
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/NeuralNetwork_test.cpp b/src/tests/NeuralNetwork_test.cpp
index 7fefcf72394aeed0131bb6400c20ac2b6c5c4a60..0260476ec41d6163b123073062c221f4260068e2 100644
--- a/src/tests/NeuralNetwork_test.cpp
+++ b/src/tests/NeuralNetwork_test.cpp
@@ -14,12 +14,17 @@
 using namespace lib4neuro;
 
 
-MOCK_BASE_CLASS(mock_NeuronLinear, lib4neuro::NeuronLinear)
+MOCK_BASE_CLASS(mock_NeuronLinear, lib4neuro::NeuronLinear
+)
 {
-	MOCK_METHOD(activate, 2)
-		MOCK_METHOD(activation_function_eval_derivative_bias, 2)
-		MOCK_METHOD(activation_function_eval_derivative, 2)
-		MOCK_METHOD(get_derivative, 0)
+MOCK_METHOD(activate,
+2)
+MOCK_METHOD(activation_function_eval_derivative_bias,
+2)
+MOCK_METHOD(activation_function_eval_derivative,
+2)
+MOCK_METHOD(get_derivative,
+0)
 };
 
 /**
@@ -27,23 +32,23 @@ MOCK_BASE_CLASS(mock_NeuronLinear, lib4neuro::NeuronLinear)
  */
 BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
 
-    /**
-     * Test of creating new instance of NeuralNetwork
-     */
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_constuction_test) {
+/**
+ * Test of creating new instance of NeuralNetwork
+ */
+BOOST_AUTO_TEST_CASE(NeuralNetwork_constuction_test) {
         //Test of none exception raise when creating new instance of NeuralNewtwork
         BOOST_CHECK_NO_THROW(NeuralNetwork network);
-    }
-
-    /**
-     * Test of add_neuron method
-     * Existing bias out of range cancelation
-     */
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_add_neuron_test) {
-		mock_NeuronLinear *n1 = new mock_NeuronLinear;
-		mock_NeuronLinear *n2 = new mock_NeuronLinear;
-		mock_NeuronLinear *n3 = new mock_NeuronLinear;
-		mock_NeuronLinear *n4 = new mock_NeuronLinear;
+}
+
+/**
+ * Test of add_neuron method
+ * Existing bias out of range cancelation
+ */
+BOOST_AUTO_TEST_CASE(NeuralNetwork_add_neuron_test) {
+        mock_NeuronLinear * n1 = new mock_NeuronLinear;
+        mock_NeuronLinear *n2 = new mock_NeuronLinear;
+        mock_NeuronLinear *n3 = new mock_NeuronLinear;
+        mock_NeuronLinear *n4 = new mock_NeuronLinear;
 
         NeuralNetwork network;
 
@@ -58,15 +63,15 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
 
         BOOST_CHECK_EQUAL(2, network.get_n_biases());
 
-  }
+}
 
-    /**
-     * Test of add_connection_simple method
-     */
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_add_connection_simple_test) {
-		mock_NeuronLinear *n1 = new mock_NeuronLinear;
-		mock_NeuronLinear *n2 = new mock_NeuronLinear;
-		NeuralNetwork network;
+/**
+ * Test of add_connection_simple method
+ */
+BOOST_AUTO_TEST_CASE(NeuralNetwork_add_connection_simple_test) {
+        mock_NeuronLinear * n1 = new mock_NeuronLinear;
+        mock_NeuronLinear *n2 = new mock_NeuronLinear;
+        NeuralNetwork network;
         network.add_neuron(n1, BIAS_TYPE::NO_BIAS);
         network.add_neuron(n2, BIAS_TYPE::NO_BIAS);
 
@@ -77,17 +82,17 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
         BOOST_CHECK_EQUAL(4, network.add_connection_simple(0, 1, SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 1));
 
         BOOST_CHECK_EQUAL(2, network.get_n_weights());
-    }
+}
 
-    /**
-     * Test of add_connection_general method
-     */
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_specify_inputs_neurons_test) {
+/**
+ * Test of add_connection_general method
+ */
+BOOST_AUTO_TEST_CASE(NeuralNetwork_specify_inputs_neurons_test) {
         NeuralNetwork network;
-		mock_NeuronLinear po;
-		mock_NeuronLinear *n1 = new mock_NeuronLinear();
+        mock_NeuronLinear po;
+        mock_NeuronLinear *n1 = new mock_NeuronLinear();
 
-		network.add_neuron(n1, BIAS_TYPE::NO_BIAS);
+        network.add_neuron(n1, BIAS_TYPE::NO_BIAS);
 
         std::vector<size_t> inputs;
         inputs.push_back(0);
@@ -95,12 +100,12 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
         BOOST_CHECK_EQUAL(0, network.get_n_inputs());
         network.specify_input_neurons(inputs);
         BOOST_CHECK_EQUAL(1, network.get_n_inputs());
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_specify_outputs_neurons_test) {
+BOOST_AUTO_TEST_CASE(NeuralNetwork_specify_outputs_neurons_test) {
         NeuralNetwork network;
-		mock_NeuronLinear *n1 = new mock_NeuronLinear;
-		network.add_neuron(n1, BIAS_TYPE::NO_BIAS);
+        mock_NeuronLinear *n1 = new mock_NeuronLinear;
+        network.add_neuron(n1, BIAS_TYPE::NO_BIAS);
 
         std::vector<size_t> outputs;
         outputs.push_back(0);
@@ -108,16 +113,16 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
         BOOST_CHECK_EQUAL(0, network.get_n_outputs());
         network.specify_output_neurons(outputs);
         BOOST_CHECK_EQUAL(1, network.get_n_outputs());
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_eval_single_test) {
-		mock_NeuronLinear *n1 = new mock_NeuronLinear();
-		mock_NeuronLinear *n2 = new mock_NeuronLinear();
+BOOST_AUTO_TEST_CASE(NeuralNetwork_eval_single_test) {
+        mock_NeuronLinear * n1 = new mock_NeuronLinear();
+        mock_NeuronLinear *n2 = new mock_NeuronLinear();
 
-		mock_NeuronLinear n3 = *n1;
-		mock_NeuronLinear n4 = *n2;
-		MOCK_EXPECT(n3.activate).returns(5);
-		MOCK_EXPECT(n4.activate).returns(5);
+        mock_NeuronLinear n3 = *n1;
+        mock_NeuronLinear n4 = *n2;
+        MOCK_EXPECT(n3.activate).returns(5);
+        MOCK_EXPECT(n4.activate).returns(5);
 
 
         NeuralNetwork network;
@@ -141,18 +146,20 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
 
         network.eval_single(input, output);
         BOOST_CHECK_EQUAL(5, output.at(0));
-    }
+}
+
+BOOST_AUTO_TEST_CASE(NeuralNetwork_randomize_weights_test) {
+        mock_NeuronLinear * n1 = new mock_NeuronLinear();
+        mock_NeuronLinear *n2 = new mock_NeuronLinear();
 
-    BOOST_AUTO_TEST_CASE(NeuralNetwork_randomize_weights_test) {
-		mock_NeuronLinear *n1 = new mock_NeuronLinear();
-		mock_NeuronLinear *n2 = new mock_NeuronLinear();
-		
-		NeuralNetwork network;
+        NeuralNetwork network;
         network.add_neuron(n1, BIAS_TYPE::NO_BIAS);
         network.add_neuron(n2, BIAS_TYPE::NO_BIAS);
 
         for (int i = 0; i < 100; i++) {
-            network.add_connection_simple(0, 1, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+            network.add_connection_simple(0,
+                                          1,
+                                          SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
         }
         network.randomize_weights();
         std::vector<double> *weights = network.get_parameter_ptr_weights();
@@ -164,6 +171,6 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
         }
         sum=sum/100;
         BOOST_CHECK(sum<0.15 && sum>-0.15);
-    }
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/NeuronBinary_test.cpp b/src/tests/NeuronBinary_test.cpp
index 39f75dec48f293f5cf92d13be4252f46e68b0a77..aa7b8d6d32a9d1af02ca03a98fa7c55136d97a07 100644
--- a/src/tests/NeuronBinary_test.cpp
+++ b/src/tests/NeuronBinary_test.cpp
@@ -19,19 +19,19 @@ using namespace lib4neuro;
  */
 BOOST_AUTO_TEST_SUITE(neuronBinary_test)
 
-    /**
-     * Test of creating new instance of NeuronBinary
-     */
-    BOOST_AUTO_TEST_CASE(neuronBinary_construction_test) {
+/**
+ * Test of creating new instance of NeuronBinary
+ */
+BOOST_AUTO_TEST_CASE(neuronBinary_construction_test) {
 
-        BOOST_CHECK_NO_THROW(NeuronBinary *neuron = new NeuronBinary());
-    }
+        BOOST_CHECK_NO_THROW(NeuronBinary * neuron = new NeuronBinary());
+}
 
-    /**
-     * Test of activate method
-     */
-    BOOST_AUTO_TEST_CASE(neuronBinary_activate_test) {
-        NeuronBinary *neuron = new NeuronBinary();
+/**
+ * Test of activate method
+ */
+BOOST_AUTO_TEST_CASE(neuronBinary_activate_test) {
+        NeuronBinary * neuron = new NeuronBinary();
 
         //Test of correct state neuron
         BOOST_CHECK_EQUAL(0.0, neuron->activate(2.0, 3.0));
@@ -39,6 +39,6 @@ BOOST_AUTO_TEST_SUITE(neuronBinary_test)
         BOOST_CHECK_EQUAL(1.0, neuron->activate(3.0, 2.0));
 
 
-    }
+}
 
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/tests/NeuronConstant_test.cpp b/src/tests/NeuronConstant_test.cpp
index 685e9b5160c3d88cd3aab9080731be694033fa40..10c18026ef0a46bcfe7a4fa7e300452867410abf 100644
--- a/src/tests/NeuronConstant_test.cpp
+++ b/src/tests/NeuronConstant_test.cpp
@@ -19,37 +19,37 @@ using namespace lib4neuro;
  */
 BOOST_AUTO_TEST_SUITE(neuronConstant_test)
 
-    /**
-     * Test of creating new instance of NeuronConstant
-     */
-    BOOST_AUTO_TEST_CASE(neuronConstant_construction_test) {
-        BOOST_CHECK_NO_THROW(NeuronConstant *neuron = new NeuronConstant(2.0));
+/**
+ * Test of creating new instance of NeuronConstant
+ */
+BOOST_AUTO_TEST_CASE(neuronConstant_construction_test) {
+        BOOST_CHECK_NO_THROW(NeuronConstant * neuron = new NeuronConstant(2.0));
         BOOST_CHECK_NO_THROW(NeuronConstant *neuron = new NeuronConstant());
 
-    }
+}
 
-    /**
-     * Test of activate method
-     */
-    BOOST_AUTO_TEST_CASE(neuronConstant_activate__test) {
-        NeuronConstant *neuron = new NeuronConstant(2.0);
+/**
+ * Test of activate method
+ */
+BOOST_AUTO_TEST_CASE(neuronConstant_activate__test) {
+        NeuronConstant * neuron = new NeuronConstant(2.0);
         //Test of correct state after activate neuron
         BOOST_CHECK_EQUAL(2.0, neuron->activate(8.0, 7.0));
-        
+
         NeuronConstant *neuron2 = new NeuronConstant();
         //Test of correct state after activate neuron
         BOOST_CHECK_EQUAL(0.0, neuron2->activate(8.0, 7.0));
-    }
+}
 
-    /**
-     * Test of derivative methods
-     */
-    BOOST_AUTO_TEST_CASE(neuronConstant_derivative_test) {
-        NeuronConstant *neuron = new NeuronConstant(2.0);
+/**
+ * Test of derivative methods
+ */
+BOOST_AUTO_TEST_CASE(neuronConstant_derivative_test) {
+        NeuronConstant * neuron = new NeuronConstant(2.0);
 
         //Test of correct output of activation_function_get_derivative method
         BOOST_CHECK_EQUAL(0.0, neuron->activation_function_eval_derivative(3.0, 2.0));
         BOOST_CHECK_EQUAL(0.0, neuron->activation_function_eval_derivative_bias(3.0, 2.0));
-    }
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/NeuronLinear_test.cpp b/src/tests/NeuronLinear_test.cpp
index 0edb14f67321718363cfd4ccd52921b27f5cc45d..0d975c69f3db8bf1327983d13fb22743990842ed 100644
--- a/src/tests/NeuronLinear_test.cpp
+++ b/src/tests/NeuronLinear_test.cpp
@@ -19,31 +19,31 @@ using namespace lib4neuro;
  */
 BOOST_AUTO_TEST_SUITE(neuronLinear_test)
 
-    /**
-     * Test of creating new instance of NeuronLinear
-     */
-    BOOST_AUTO_TEST_CASE(neuronLinear_construction_test) {
-        BOOST_CHECK_NO_THROW(NeuronLinear *neuron = new NeuronLinear());
-    }
-
-    /**
-     * Test of activate method
-     */
-    BOOST_AUTO_TEST_CASE(neuronLinear_activate_test) {
-        NeuronLinear *neuron = new NeuronLinear();
+/**
+ * Test of creating new instance of NeuronLinear
+ */
+BOOST_AUTO_TEST_CASE(neuronLinear_construction_test) {
+        BOOST_CHECK_NO_THROW(NeuronLinear * neuron = new NeuronLinear());
+}
+
+/**
+ * Test of activate method
+ */
+BOOST_AUTO_TEST_CASE(neuronLinear_activate_test) {
+        NeuronLinear * neuron = new NeuronLinear();
         //Test of correct state after activate neuron
         BOOST_CHECK_EQUAL(5.0, neuron->activate(3.0, 2.0));
-    }
+}
 
-    /**
-     * Test of derivative methods
-     */
-    BOOST_AUTO_TEST_CASE(neuronLinear_derivative_test) {
-        NeuronLinear *neuron = new NeuronLinear();
+/**
+ * Test of derivative methods
+ */
+BOOST_AUTO_TEST_CASE(neuronLinear_derivative_test) {
+        NeuronLinear * neuron = new NeuronLinear();
 
         //Test of correct output of activation_function_get_derivative method
         BOOST_CHECK_EQUAL(1.0, neuron->activation_function_eval_derivative(3.0, 2.0));
         BOOST_CHECK_EQUAL(1.0, neuron->activation_function_eval_derivative_bias(3.0, 2.0));
-    }
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/NeuronLogistic_test.cpp b/src/tests/NeuronLogistic_test.cpp
index 4afb47f5704f4b6d5c62e965948cfcfb3e7aa6ca..04fb24f5f57315bce7b470ee5bf5d6549265c1d1 100644
--- a/src/tests/NeuronLogistic_test.cpp
+++ b/src/tests/NeuronLogistic_test.cpp
@@ -19,84 +19,84 @@ using namespace lib4neuro;
  */
 BOOST_AUTO_TEST_SUITE(neuronLogistic_test)
 
-    /**
-     * Test of creating new instance of NeuronLogistic
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_construction__test) {
-        BOOST_CHECK_NO_THROW(NeuronLogistic *neuron = new NeuronLogistic());
+/**
+ * Test of creating new instance of NeuronLogistic
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_construction__test) {
+        BOOST_CHECK_NO_THROW(NeuronLogistic * neuron = new NeuronLogistic());
 
-    }
+}
 
-    /**
-     * Test of activate method
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_activate__test) {
-        NeuronLogistic *neuron = new NeuronLogistic();
+/**
+ * Test of activate method
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_activate__test) {
+        NeuronLogistic * neuron = new NeuronLogistic();
 
         //Test of correct state after activate neuron
-        BOOST_CHECK_CLOSE(0.73105857863, neuron->activate(3.0,2.0), 0.00001);
-    }
-
-    /**
-     * Test of derivative methods
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_derivative_test) {
-        NeuronLogistic *neuron = new NeuronLogistic();
+        BOOST_CHECK_CLOSE(0.73105857863, neuron->activate(3.0, 2.0), 0.00001);
+}
+
+/**
+ * Test of derivative methods
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_derivative_test) {
+        NeuronLogistic * neuron = new NeuronLogistic();
         //3.0 2.0
         //Test of correct output of activation_function_get_derivative method
-        BOOST_CHECK_CLOSE(0.196611933241, neuron->activation_function_eval_derivative(3.0,2.0), 0.00001);
-        BOOST_CHECK_CLOSE(-0.196611933241, neuron->activation_function_eval_derivative_bias(3.0,2.0), 0.00001);
+        BOOST_CHECK_CLOSE(0.196611933241, neuron->activation_function_eval_derivative(3.0, 2.0), 0.00001);
+        BOOST_CHECK_CLOSE(-0.196611933241, neuron->activation_function_eval_derivative_bias(3.0, 2.0), 0.00001);
 
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(neuronLogistic_d1_construction__test) {
-        BOOST_CHECK_NO_THROW(NeuronLogistic_d1 *neuron = new NeuronLogistic_d1());
-    }
+BOOST_AUTO_TEST_CASE(neuronLogistic_d1_construction__test) {
+        BOOST_CHECK_NO_THROW(NeuronLogistic_d1 * neuron = new NeuronLogistic_d1());
+}
 
-    /**
-     * Test of activate method
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_d1_activate__test) {
-        NeuronLogistic_d1 *neuron = new NeuronLogistic_d1();
+/**
+ * Test of activate method
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_d1_activate__test) {
+        NeuronLogistic_d1 * neuron = new NeuronLogistic_d1();
 
         //Test of correct state after activate neuron
-        BOOST_CHECK_CLOSE(0.196611933241, neuron->activate(3.0,2.0), 0.00001);
-    }
-
-    /**
-     * Test of derivative methods
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_d1_derivative_test) {
-        NeuronLogistic_d1 *neuron = new NeuronLogistic_d1();
+        BOOST_CHECK_CLOSE(0.196611933241, neuron->activate(3.0, 2.0), 0.00001);
+}
+
+/**
+ * Test of derivative methods
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_d1_derivative_test) {
+        NeuronLogistic_d1 * neuron = new NeuronLogistic_d1();
         //3.0 2.0
         //Test of correct output of activation_function_get_derivative method
-        BOOST_CHECK_CLOSE(-0.0908577476729, neuron->activation_function_eval_derivative(3.0,2.0), 0.00001);
-        BOOST_CHECK_CLOSE(0.0908577476729, neuron->activation_function_eval_derivative_bias(3.0,2.0), 0.00001);
-    }
+        BOOST_CHECK_CLOSE(-0.0908577476729, neuron->activation_function_eval_derivative(3.0, 2.0), 0.00001);
+        BOOST_CHECK_CLOSE(0.0908577476729, neuron->activation_function_eval_derivative_bias(3.0, 2.0), 0.00001);
+}
 
-    BOOST_AUTO_TEST_CASE(neuronLogistic_d2_construction__test) {
-        BOOST_CHECK_NO_THROW(NeuronLogistic_d2 *neuron = new NeuronLogistic_d2());
-    }
+BOOST_AUTO_TEST_CASE(neuronLogistic_d2_construction__test) {
+        BOOST_CHECK_NO_THROW(NeuronLogistic_d2 * neuron = new NeuronLogistic_d2());
+}
 
-    /**
-     * Test of activate method
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_d2_activate__test) {
-        NeuronLogistic_d2 *neuron = new NeuronLogistic_d2();
+/**
+ * Test of activate method
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_d2_activate__test) {
+        NeuronLogistic_d2 * neuron = new NeuronLogistic_d2();
 
         //Test of correct state after activate neuron
-        BOOST_CHECK_CLOSE(-0.0908577476729, neuron->activate(3.0,2.0), 0.00001);
-    }
-
-    /**
-     * Test of derivative methods
-     */
-    BOOST_AUTO_TEST_CASE(neuronLogistic_d2_derivative_test) {
-        NeuronLogistic_d2 *neuron = new NeuronLogistic_d2();
+        BOOST_CHECK_CLOSE(-0.0908577476729, neuron->activate(3.0, 2.0), 0.00001);
+}
+
+/**
+ * Test of derivative methods
+ */
+BOOST_AUTO_TEST_CASE(neuronLogistic_d2_derivative_test) {
+        NeuronLogistic_d2 * neuron = new NeuronLogistic_d2();
         //3.0 2.0
         //Test of correct output of activation_function_get_derivative method
-        BOOST_CHECK_CLOSE(-0.03532558051623, neuron->activation_function_eval_derivative(3.0,2.0), 0.00001);
-        BOOST_CHECK_CLOSE(0.03532558051623, neuron->activation_function_eval_derivative_bias(3.0,2.0), 0.00001);
-    }
+        BOOST_CHECK_CLOSE(-0.03532558051623, neuron->activation_function_eval_derivative(3.0, 2.0), 0.00001);
+        BOOST_CHECK_CLOSE(0.03532558051623, neuron->activation_function_eval_derivative_bias(3.0, 2.0), 0.00001);
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/ParticleSwarm_test.cpp b/src/tests/ParticleSwarm_test.cpp
index 8d0aaf1bbae1a280f1794ec42ee3583c35c89582..afcc4a268298302552c5bedc8aa18946c547fbb6 100644
--- a/src/tests/ParticleSwarm_test.cpp
+++ b/src/tests/ParticleSwarm_test.cpp
@@ -17,49 +17,49 @@ using namespace lib4neuro;
  * Boost testing suite for testing ParticleSwarm.h
  */
 
-double test_particle_swarm_neural_net_error_function(double *weights){
-	return 0;
+double test_particle_swarm_neural_net_error_function(double* weights) {
+    return 0;
 }
 
 BOOST_AUTO_TEST_SUITE(ParticleSwarm_test)
 
 
-    BOOST_AUTO_TEST_CASE(ParticleSwarm_construction_test){
+BOOST_AUTO_TEST_CASE(ParticleSwarm_construction_test){
         std::vector<double> domain_bound;
         domain_bound.push_back(5);
 
         BOOST_CHECK_NO_THROW(ParticleSwarm swarm(&domain_bound, 0, 1, 1, 0.5, 0.05, 0.5, 0, 20));
-    }
+}
 
-    BOOST_AUTO_TEST_CASE(ParticleSwarm_optimalize_and_get_parameters_test){
+BOOST_AUTO_TEST_CASE(ParticleSwarm_optimalize_and_get_parameters_test){
         std::vector<double> domain_bound;
         domain_bound.push_back(-5);
-		domain_bound.push_back(5);
-		domain_bound.push_back(-5);
-		domain_bound.push_back(5);
-		domain_bound.push_back(-5);
-		domain_bound.push_back(5);
-		domain_bound.push_back(-5);
-		domain_bound.push_back(5);
-		domain_bound.push_back(-5);
-		domain_bound.push_back(5);
+        domain_bound.push_back(5);
+        domain_bound.push_back(-5);
+        domain_bound.push_back(5);
+        domain_bound.push_back(-5);
+        domain_bound.push_back(5);
+        domain_bound.push_back(-5);
+        domain_bound.push_back(5);
+        domain_bound.push_back(-5);
+        domain_bound.push_back(5);
 
 
-		mock_ErrorFunction error;
+        mock_ErrorFunction error;
 
-		MOCK_EXPECT(error.get_dimension).returns(5);
-		MOCK_EXPECT(error.eval).returns(0.8);
+        MOCK_EXPECT(error.get_dimension).returns(5);
+        MOCK_EXPECT(error.eval).returns(0.8);
 
-		ParticleSwarm swarm(&domain_bound, 0, 1, 1, 1, 1, 1, 5, 20);
+        ParticleSwarm swarm(&domain_bound, 0, 1, 1, 1, 1, 1, 5, 20);
 
-		BOOST_CHECK_NO_THROW(swarm.optimize(error));
+        BOOST_CHECK_NO_THROW(swarm.optimize(error));
 
 
 
-		for (int i = 0; i < swarm.get_parameters()->size(); i++) {
-			BOOST_CHECK_NO_THROW(swarm.get_parameters()->at(i));
-		}
+        for (int i = 0; i < swarm.get_parameters()->size(); i++) {
+            BOOST_CHECK_NO_THROW(swarm.get_parameters()->at(i));
+        }
 
-    }
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tests/Particle_test.cpp b/src/tests/Particle_test.cpp
index 211397c5d937d457e097173ba9bcc618fe970209..156dd8f158f2ef39438d12ce3736ad065ffd5930 100644
--- a/src/tests/Particle_test.cpp
+++ b/src/tests/Particle_test.cpp
@@ -20,36 +20,36 @@ using namespace lib4neuro;
 BOOST_AUTO_TEST_SUITE(Particle_test)
 
 BOOST_AUTO_TEST_CASE(Particle_construction_test) {
-	std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
-	mock_ErrorFunction error;
-	MOCK_EXPECT(error.get_dimension).once().returns(5);
-	MOCK_EXPECT(error.eval).once().returns(0.8);
-	BOOST_CHECK_NO_THROW(Particle(&error, &domain_bound));
+        std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
+        mock_ErrorFunction error;
+        MOCK_EXPECT(error.get_dimension).once().returns(5);
+        MOCK_EXPECT(error.eval).once().returns(0.8);
+        BOOST_CHECK_NO_THROW(Particle(&error, &domain_bound));
 }
 
 BOOST_AUTO_TEST_CASE(Particle_get_coordinate_test) {
-	std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
-	mock_ErrorFunction error;
+        std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
+        mock_ErrorFunction error;
 
-	MOCK_EXPECT(error.get_dimension).returns(5);
-	MOCK_EXPECT(error.eval).returns(0.8);
+        MOCK_EXPECT(error.get_dimension).returns(5);
+        MOCK_EXPECT(error.eval).returns(0.8);
 
-	Particle particle1(&error, &domain_bound);
-	Particle particle2(&error, &domain_bound);
+        Particle particle1(&error, &domain_bound);
+        Particle particle2(&error, &domain_bound);
 
-	BOOST_CHECK(*particle1.get_coordinate() != *particle2.get_coordinate());
+        BOOST_CHECK(*particle1.get_coordinate() != *particle2.get_coordinate());
 }
 
 BOOST_AUTO_TEST_CASE(Particle_get_optimal_value_test) {
-	std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
-	mock_ErrorFunction error;
+        std::vector<double> domain_bound{ 1, 2, 3, 4, 5 };
+        mock_ErrorFunction error;
 
-	MOCK_EXPECT(error.get_dimension).returns(5);
-	MOCK_EXPECT(error.eval).returns(0.8);
+        MOCK_EXPECT(error.get_dimension).returns(5);
+        MOCK_EXPECT(error.eval).returns(0.8);
 
 
-	Particle particle1(&error, &domain_bound);
-	BOOST_CHECK_EQUAL(0.8, particle1.get_optimal_value());
+        Particle particle1(&error, &domain_bound);
+        BOOST_CHECK_EQUAL(0.8, particle1.get_optimal_value());
 }