From 3b275aaeff11cf31e3e079b262a88542bd44c954 Mon Sep 17 00:00:00 2001
From: Martin Beseda <martin.beseda@vsb.cz>
Date: Mon, 1 Apr 2019 04:21:39 +0200
Subject: [PATCH] [CODE] Reformatted source code.

---
 src/CSVReader/CSVReader.cpp                   |  12 +-
 src/DataSet/DataSet.cpp                       |  51 +++---
 src/ErrorFunction/ErrorFunctions.cpp          | 154 +++++++++---------
 src/ErrorFunction/ErrorFunctions.h            |  28 ++--
 src/General/ExprtkWrapperSerialization.h      |   4 +-
 src/LearningMethods/GradientDescent.cpp       |  56 +++----
 src/LearningMethods/GradientDescent.h         |  12 +-
 src/LearningMethods/GradientDescentBB.cpp     |  40 ++---
 .../GradientDescentSingleItem.cpp             |  18 +-
 src/LearningMethods/LearningSequence.cpp      |   6 +-
 src/LearningMethods/LevenbergMarquardt.cpp    |  71 ++++----
 src/LearningMethods/ParticleSwarm.cpp         |  84 +++++-----
 src/LearningMethods/ParticleSwarm.h           |  36 ++--
 .../ConnectionFunctionIdentity.cpp            |   2 +-
 src/Network/NeuralNetwork.cpp                 |  93 +++++------
 src/Network/NeuralNetwork.h                   |   2 +-
 src/Neuron/NeuronLogistic.cpp                 |  42 ++---
 .../NormalizationStrategy.h                   |   2 +-
 src/constants.h                               |   2 +-
 src/examples/net_test_1.cpp                   |  52 +++---
 src/examples/net_test_2.cpp                   |  52 +++---
 src/examples/net_test_3.cpp                   |  20 +--
 src/examples/net_test_harmonic_oscilator.cpp  |  70 ++++----
 src/examples/net_test_ode_1.cpp               |  80 ++++-----
 src/examples/net_test_pde_1.cpp               |  90 +++++-----
 src/examples/network_serialization.cpp        |  66 ++++----
 src/examples/seminar.cpp                      |  54 +++---
 src/examples/x2_fitting.cpp                   |  18 +-
 src/tests/CMakeLists.txt                      | 150 ++++++++---------
 src/tests/ConnectionFunctionGeneral_test.cpp  |   2 +-
 src/tests/DESolver_test.cpp                   |   2 +-
 31 files changed, 694 insertions(+), 677 deletions(-)

diff --git a/src/CSVReader/CSVReader.cpp b/src/CSVReader/CSVReader.cpp
index c7c727c1..7eb47b2e 100644
--- a/src/CSVReader/CSVReader.cpp
+++ b/src/CSVReader/CSVReader.cpp
@@ -91,7 +91,7 @@ namespace lib4neuro {
         for (auto line : this->data) {
             //TODO check empty values in data
             std::vector<double> input;
-            for (auto ind : *input_col_indices) {
+            for (auto           ind : *input_col_indices) {
                 std::string s;
 
                 try {
@@ -113,17 +113,19 @@ namespace lib4neuro {
                     /* Add loaded number to the vector of inputs */
                     input.push_back(tmp);
 
-                } catch (const std::out_of_range& e) {
+                }
+                catch (const std::out_of_range& e) {
                     THROW_OUT_OF_RANGE_ERROR("Non-existing index specified (" + std::to_string(ind) + ")!");
 
-                } catch (const boost::bad_lexical_cast& e) {
+                }
+                catch (const boost::bad_lexical_cast& e) {
                     THROW_RUNTIME_ERROR(
-                            "Value \"" + s + "\" is not numerical and so it cannot be used in Data Set!");
+                        "Value \"" + s + "\" is not numerical and so it cannot be used in Data Set!");
                 }
             }
 
             std::vector<double> output;
-            for (auto ind : *output_col_indices) {
+            for (auto           ind : *output_col_indices) {
                 output.emplace_back(std::stod(line.at(ind)));
             }
 
diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index 2457abbf..e28ff804 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -10,9 +10,9 @@ BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::DataSet);
 namespace lib4neuro {
 
     DataSet::DataSet() {
-        this->n_elements = 0;
-        this->input_dim = 0;
-        this->output_dim = 0;
+        this->n_elements             = 0;
+        this->input_dim              = 0;
+        this->output_dim             = 0;
         this->normalization_strategy = std::make_shared<DoubleUnitStrategy>(DoubleUnitStrategy());
     }
 
@@ -22,10 +22,11 @@ namespace lib4neuro {
             try {
                 boost::archive::text_iarchive ia(ifs);
                 ia >> *this;
-            } catch (boost::archive::archive_exception& e) {
+            }
+            catch (boost::archive::archive_exception& e) {
                 THROW_RUNTIME_ERROR(
-                        "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
-                                                                   "the serialized DataSet.");
+                    "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
+                                                               "the serialized DataSet.");
             }
             ifs.close();
         } else {
@@ -40,8 +41,8 @@ namespace lib4neuro {
                      NormalizationStrategy* ns) {
         this->data.clear();
         this->n_elements = data_ptr->size();
-        this->data = *data_ptr;
-        this->input_dim = this->data[0].first.size();
+        this->data       = *data_ptr;
+        this->input_dim  = this->data[0].first.size();
         this->output_dim = this->data[0].second.size();
 
         if (ns) {
@@ -59,9 +60,9 @@ namespace lib4neuro {
                      double output,
                      NormalizationStrategy* ns) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
-        this->data = new_data_vec;
+        this->data       = new_data_vec;
         this->n_elements = 0;
-        this->input_dim = 1;
+        this->input_dim  = 1;
         this->output_dim = 1;
 
         if (ns) {
@@ -81,8 +82,8 @@ namespace lib4neuro {
                      unsigned int output_dim,
                      NormalizationStrategy* ns) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
-        this->data = new_data_vec;
-        this->input_dim = bounds.size() / 2;
+        this->data       = new_data_vec;
+        this->input_dim  = bounds.size() / 2;
         this->output_dim = output_dim;
         this->n_elements = 0;
 
@@ -97,14 +98,14 @@ namespace lib4neuro {
                                  output_func);
     }
 
-    DataSet::~DataSet(){
+    DataSet::~DataSet() {
 
     }
 
     void DataSet::add_data_pair(std::vector<double>& inputs,
                                 std::vector<double>& outputs) {
         if (this->n_elements == 0 && this->input_dim == 0 && this->output_dim == 0) {
-            this->input_dim = inputs.size();
+            this->input_dim  = inputs.size();
             this->output_dim = outputs.size();
         }
 
@@ -157,8 +158,8 @@ namespace lib4neuro {
         // TODO add check of dataset dimensions
 
         std::vector<std::vector<double>> grid;
-        std::vector<double> tmp;
-        double frac;
+        std::vector<double>              tmp;
+        double                           frac;
         if (no_elems_in_one_dim < 1) {
             THROW_INVALID_ARGUMENT_ERROR("Number of elements in one dimension has to be >=1 !");
         }
@@ -257,7 +258,7 @@ namespace lib4neuro {
         if (!ofs.is_open()) {
             THROW_RUNTIME_ERROR("File " + file_path + " couldn't be open!");
         } else {
-            this->store_data_text( &ofs );
+            this->store_data_text(&ofs);
             ofs.close();
         }
     }
@@ -265,7 +266,7 @@ namespace lib4neuro {
     template<class T>
     std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>>* v) {
         std::vector<std::vector<double>> v_combined_old, v_combined, v_tmp;
-        std::vector<double> tmp;
+        std::vector<double>              tmp;
 
         for (const auto& e : v->at(0)) {
             tmp = {e};
@@ -307,12 +308,12 @@ namespace lib4neuro {
             this->max_min_inp_val.emplace_back(this->data.at(0).first.at(0));
         }
 
-        double tmp, tmp2;
+        double    tmp, tmp2;
         for (auto pair : this->data) {
             /* Finding maximum */
             //TODO make more efficiently
-            tmp = *std::max_element(pair.first.begin(),
-                                    pair.first.end());
+            tmp  = *std::max_element(pair.first.begin(),
+                                     pair.first.end());
             tmp2 = *std::max_element(pair.second.begin(),
                                      pair.second.end());
 
@@ -325,8 +326,8 @@ namespace lib4neuro {
             }
 
             /* Finding minimum */
-            tmp = *std::min_element(pair.first.begin(),
-                                    pair.first.end());
+            tmp  = *std::min_element(pair.first.begin(),
+                                     pair.first.end());
             tmp2 = *std::min_element(pair.second.begin(),
                                      pair.second.end());
 
@@ -371,7 +372,7 @@ namespace lib4neuro {
         if (!this->normalized || !this->normalization_strategy) {
             return val;
         }
-        return this->normalization_strategy->de_normalize( val );
+        return this->normalization_strategy->de_normalize(val);
     }
 
     void DataSet::get_input(std::vector<double>& d,
@@ -456,7 +457,7 @@ namespace lib4neuro {
                                                 this->data.size()) + 1;
             n_chosen = max;
             std::vector<size_t> chosens;
-            size_t chosen;
+            size_t              chosen;
 
             for (size_t i = 0; i < n_chosen; i++) {
                 chosen = rand() % this->data.size();
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 24302be3..3a6dd334 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -22,7 +22,7 @@ namespace lib4neuro {
         this->ds_full = this->ds;
 
         /* Choose random subset of the DataSet for training and the remaining part for validation */
-        boost::random::mt19937 gen;
+        boost::random::mt19937                    gen;
         boost::random::uniform_int_distribution<> dist(0,
                                                        ds_size - 1);
 
@@ -65,21 +65,24 @@ namespace lib4neuro {
         }
     }
 
-    void MSE::get_jacobian_and_rhs(std::vector<std::vector<double>> &jacobian, std::vector<double> &rhs) {
+    void MSE::get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
+                                   std::vector<double>& rhs) {
 //        size_t row_idx = 0;
         std::vector<double> partial_error(this->get_n_outputs());
-        rhs.resize( this->get_dimension());
-        std::fill(rhs.begin(), rhs.end(), 0.0);
+        rhs.resize(this->get_dimension());
+        std::fill(rhs.begin(),
+                  rhs.end(),
+                  0.0);
 
         std::vector<std::vector<double>> jac_loc;
-        for (auto item: *this->ds->get_data()) {
+        for (auto                        item: *this->ds->get_data()) {
 
             this->nets[0]->get_jacobian(jac_loc,
-                           item,
-                           partial_error);
+                                        item,
+                                        partial_error);
 
             for (size_t ri = 0; ri < jac_loc.size(); ++ri) {
-                jacobian.push_back( jac_loc[ri] );
+                jacobian.push_back(jac_loc[ri]);
 
                 for (size_t ci = 0; ci < this->get_dimension(); ++ci) {
 //                    J.at(row_idx,
@@ -94,7 +97,7 @@ namespace lib4neuro {
     MSE::MSE(NeuralNetwork* net,
              DataSet* ds) {
         this->nets.push_back(net);
-        this->ds = ds;
+        this->ds        = ds;
         this->dimension = net->get_n_weights() + net->get_n_biases();
     }
 
@@ -103,8 +106,8 @@ namespace lib4neuro {
                                      std::vector<double>* weights) {
         std::vector<double> predicted_output(this->nets[0]->get_n_outputs());
         this->nets[0]->eval_single(*input,
-                               predicted_output,
-                               weights);
+                                   predicted_output,
+                                   weights);
         double result = 0;
         double val;
 
@@ -120,17 +123,17 @@ namespace lib4neuro {
                                  std::ofstream* results_file_path,
                                  std::vector<double>* weights,
                                  bool verbose
-                                 ) {
-        size_t dim_in = data_set->get_input_dim();
+    ) {
+        size_t dim_in  = data_set->get_input_dim();
         size_t dim_out = data_set->get_output_dim();
-        double error = 0.0, val, output_norm = 0;
+        double error   = 0.0, val, output_norm = 0;
 
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = data_set->get_data();
         size_t n_elements = data->size();
 
         //TODO instead use something smarter
         std::vector<std::vector<double>> outputs(data->size());
-        std::vector<double> output(dim_out);
+        std::vector<double>              output(dim_out);
 
         if (verbose) {
             COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
@@ -156,8 +159,8 @@ namespace lib4neuro {
         for (size_t i = 0; i < data->size(); i++) {  // Iterate through every element in the test set
             /* Compute the net output and store it into 'output' variable */
             this->nets[0]->eval_single(data->at(i).first,
-                                   output,
-                                   weights);
+                                       output,
+                                       weights);
 
             outputs.at(i) = output;
         }
@@ -167,12 +170,12 @@ namespace lib4neuro {
         double denormalized_real_output;
 
         std::string separator = "";
-        for (size_t i = 0; i < data->size(); i++) {
+        for (size_t i         = 0; i < data->size(); i++) {
 
             /* Compute difference for every element of the output vector */
 #ifdef L4N_DEBUG
             std::stringstream ss_input;
-            for (size_t j = 0; j < dim_in; j++) {
+            for (size_t       j = 0; j < dim_in; j++) {
                 denormalized_real_input = data_set->get_denormalized_value(data->at(i).first.at(j));
                 ss_input << separator << denormalized_real_input;
                 separator = ",";
@@ -184,10 +187,10 @@ namespace lib4neuro {
 
             double loc_error = 0;
             output_norm = 0;
-            separator = "";
+            separator   = "";
             for (size_t j = 0; j < dim_out; ++j) {
                 denormalized_real_output = data_set->get_denormalized_value(data->at(i).second.at(j));
-                denormalized_output = data_set->get_denormalized_value(outputs.at(i).at(j));
+                denormalized_output      = data_set->get_denormalized_value(outputs.at(i).at(j));
 
 #ifdef L4N_DEBUG
                 ss_real_output << separator << denormalized_real_output;
@@ -313,7 +316,7 @@ namespace lib4neuro {
                                   double alpha,
                                   size_t batch) {
 
-        size_t dim_out = this->ds->get_output_dim();
+        size_t dim_out    = this->ds->get_output_dim();
         size_t n_elements = this->ds->get_n_elements();
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
 
@@ -326,17 +329,17 @@ namespace lib4neuro {
         for (auto el: *data) {  // Iterate through every element in the test set
 
             this->nets[0]->eval_single(el.first,
-                                   error_derivative,
-                                   &params);  // Compute the net output and store it into 'output' variable
+                                       error_derivative,
+                                       &params);  // Compute the net output and store it into 'output' variable
 
             for (size_t j = 0; j < dim_out; ++j) {
                 error_derivative.at(j) = 2.0 * (error_derivative.at(j) - el.second.at(j)); //real - expected result
             }
 
             this->nets[0]->add_to_gradient_single(el.first,
-                                              error_derivative,
-                                              alpha / n_elements,
-                                              grad);
+                                                  error_derivative,
+                                                  alpha / n_elements,
+                                                  grad);
         }
     }
 
@@ -359,8 +362,8 @@ namespace lib4neuro {
 
         //TODO check input vector sizes - they HAVE TO be allocated before calling this function
 
-        size_t n_parameters = this->get_dimension();
-        std::vector<double> parameters = this->get_parameters();
+        size_t              n_parameters = this->get_dimension();
+        std::vector<double> parameters   = this->get_parameters();
 
         double delta;  // Complete step size
         double former_parameter_value;
@@ -368,7 +371,7 @@ namespace lib4neuro {
         double f_val2;  // f(x - delta)
 
         for (size_t i = 0; i < n_parameters; i++) {
-            delta = h * (1 + std::abs(parameters.at(i)));
+            delta                  = h * (1 + std::abs(parameters.at(i)));
             former_parameter_value = parameters.at(i);
 
             if (delta != 0) {
@@ -399,9 +402,9 @@ namespace lib4neuro {
                   0);
         std::vector<double> dummy_input;
         this->nets[0]->add_to_gradient_single(dummy_input,
-                                          error_vector,
-                                          1.0,
-                                          gradient_vector);
+                                              error_vector,
+                                              1.0,
+                                              gradient_vector);
     }
 
     void
@@ -410,7 +413,7 @@ namespace lib4neuro {
                                 double alpha,
                                 size_t batch) {
 
-        size_t dim_out = this->ds->get_output_dim();
+        size_t dim_out    = this->ds->get_output_dim();
         size_t n_elements = this->ds->get_n_elements();
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
 
@@ -429,8 +432,8 @@ namespace lib4neuro {
         for (auto el: *data) {  // Iterate through every element in the test set
 
             this->nets[0]->eval_single_debug(el.first,
-                                         error_derivative,
-                                         &params);  // Compute the net output and store it into 'output' variable
+                                             error_derivative,
+                                             &params);  // Compute the net output and store it into 'output' variable
             std::cout << "Input[";
             for (auto v: el.first) {
                 std::cout << v << ", ";
@@ -462,9 +465,9 @@ namespace lib4neuro {
                       grad.end(),
                       0.0);
             this->nets[0]->add_to_gradient_single_debug(el.first,
-                                                    error_derivative,
-                                                    1.0,
-                                                    grad);
+                                                        error_derivative,
+                                                        1.0,
+                                                        grad);
             for (size_t i = 0; i < grad.size(); ++i) {
                 grad_sum.at(i) += grad.at(i);
             }
@@ -490,8 +493,8 @@ namespace lib4neuro {
         double output = 0, val;
 
         this->nets[0]->eval_single(this->ds->get_data()->at(i).first,
-                               error_vector,
-                               parameter_vector);
+                                   error_vector,
+                                   parameter_vector);
 
         for (size_t j = 0; j < error_vector.size(); ++j) {  // Compute difference for every element of the output vector
             val = error_vector.at(j) - this->ds->get_data()->at(i).second.at(j);
@@ -500,7 +503,7 @@ namespace lib4neuro {
 
         for (size_t j = 0; j < error_vector.size(); ++j) {
             error_vector.at(j) =
-                    2.0 * (error_vector.at(j) - this->ds->get_data()->at(i).second.at(j)); //real - expected result
+                2.0 * (error_vector.at(j) - this->ds->get_data()->at(i).second.at(j)); //real - expected result
         }
 
         return sqrt(output);
@@ -509,17 +512,17 @@ namespace lib4neuro {
 
     std::vector<double> MSE::get_parameters() {
         std::vector<double> output(this->get_dimension());
-        for(size_t i = 0; i < this->nets[0]->get_n_weights(); ++i){
+        for (size_t         i = 0; i < this->nets[0]->get_n_weights(); ++i) {
             output[i] = this->nets[0]->get_parameter_ptr_weights()->at(i);
         }
-        for(size_t i = 0; i < this->nets[0]->get_n_biases(); ++i){
+        for (size_t         i = 0; i < this->nets[0]->get_n_biases(); ++i) {
             output[i + this->nets[0]->get_n_weights()] = this->nets[0]->get_parameter_ptr_biases()->at(i);
         }
         return output;
     }
 
-    void MSE::set_parameters(std::vector<double> &params) {
-        this->nets[0]->copy_parameter_space( &params );
+    void MSE::set_parameters(std::vector<double>& params) {
+        this->nets[0]->copy_parameter_space(&params);
     }
 
     size_t MSE::get_n_data_set() {
@@ -540,15 +543,15 @@ namespace lib4neuro {
     }
 
     ErrorSum::ErrorSum() {
-        this->summand = nullptr;
+        this->summand   = nullptr;
         this->dimension = 0;
     }
 
     ErrorSum::~ErrorSum() {
         if (this->summand) {
 
-            for( auto el: *this->summand){
-                if(el){
+            for (auto el: *this->summand) {
+                if (el) {
                     delete el;
                 }
             }
@@ -636,13 +639,13 @@ namespace lib4neuro {
                                              std::vector<double>* parameter_vector,
                                              std::vector<double>& error_vector) {
         double output = 0.0;
-        ErrorFunction* ef = nullptr;
+        ErrorFunction* ef     = nullptr;
         std::fill(error_vector.begin(),
                   error_vector.end(),
                   0);
 
         std::vector<double> error_vector_mem(error_vector.size());
-        for (size_t j = 0; j < this->summand->size(); ++j) {
+        for (size_t         j = 0; j < this->summand->size(); ++j) {
             ef = this->summand->at(i);
 
             if (ef) {
@@ -724,10 +727,10 @@ namespace lib4neuro {
         return this->summand->at(0)->get_parameters();
     }
 
-    void ErrorSum::set_parameters(std::vector<double> &params) {
+    void ErrorSum::set_parameters(std::vector<double>& params) {
         //TODO may cause problems for general error sum...
-        for(auto n: *this->summand){
-            n->set_parameters( params );
+        for (auto n: *this->summand) {
+            n->set_parameters(params);
         }
     }
 
@@ -747,12 +750,15 @@ namespace lib4neuro {
         return 0;
     }
 
-    double ErrorSum::eval_on_single_input(std::vector<double> *input, std::vector<double> *output,
-                                          std::vector<double> *weights) {
+    double ErrorSum::eval_on_single_input(std::vector<double>* input,
+                                          std::vector<double>* output,
+                                          std::vector<double>* weights) {
         double o = 0.0;
 
-        for(size_t i = 0; i < this->summand->size(); ++i){
-            o += this->summand->at( i )->eval_on_single_input( input, output, weights ) * this->summand_coefficient.at( i );
+        for (size_t i = 0; i < this->summand->size(); ++i) {
+            o += this->summand->at(i)->eval_on_single_input(input,
+                                                            output,
+                                                            weights) * this->summand_coefficient.at(i);
         }
 
         return o;
@@ -761,8 +767,8 @@ namespace lib4neuro {
     size_t ErrorSum::get_n_data_set() {
         size_t o = 0;
 
-        for(size_t i = 0; i < this->summand->size(); ++i){
-            o += this->summand->at( i )->get_n_data_set();
+        for (size_t i = 0; i < this->summand->size(); ++i) {
+            o += this->summand->at(i)->get_n_data_set();
         }
 
         return o;
@@ -771,8 +777,8 @@ namespace lib4neuro {
     size_t ErrorSum::get_n_test_data_set() {
         size_t o = 0;
 
-        for(size_t i = 0; i < this->summand->size(); ++i){
-            o += this->summand->at( i )->get_n_test_data_set();
+        for (size_t i = 0; i < this->summand->size(); ++i) {
+            o += this->summand->at(i)->get_n_test_data_set();
         }
 
         return o;
@@ -781,41 +787,43 @@ namespace lib4neuro {
     size_t ErrorSum::get_n_outputs() {
         size_t o = 0;
 
-        for(size_t i = 0; i < this->summand->size(); ++i){
-            o += this->summand->at( i )->get_n_outputs();
+        for (size_t i = 0; i < this->summand->size(); ++i) {
+            o += this->summand->at(i)->get_n_outputs();
         }
 
         return o;
     }
 
     void ErrorSum::divide_data_train_test(double percent) {
-        for(auto n: *this->summand){
-            n->divide_data_train_test( percent );
+        for (auto n: *this->summand) {
+            n->divide_data_train_test(percent);
         }
     }
 
     void ErrorSum::return_full_data_set_for_training() {
-        for(auto n: *this->summand){
+        for (auto n: *this->summand) {
             n->return_full_data_set_for_training();
         }
     }
 
-    void ErrorSum::get_jacobian_and_rhs(std::vector<std::vector<double>> &jacobian, std::vector<double> &rhs) {
-        for(auto n: *this->summand){
+    void ErrorSum::get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
+                                        std::vector<double>& rhs) {
+        for (auto n: *this->summand) {
             std::vector<double> rhs_loc;
-            n->get_jacobian_and_rhs( jacobian, rhs_loc );
+            n->get_jacobian_and_rhs(jacobian,
+                                    rhs_loc);
 
             size_t curr_size = rhs.size();
             rhs.resize(curr_size + rhs_loc.size());
-            for(size_t i = 0; i < rhs_loc.size(); ++i){
+            for (size_t i = 0; i < rhs_loc.size(); ++i) {
                 rhs.at(i + curr_size) = rhs_loc.at(i);
             }
         }
     }
 
     void ErrorSum::randomize_parameters(double scaling) {
-        for(auto n: *this->summand){
-            n->randomize_parameters( scaling );
+        for (auto n: *this->summand) {
+            n->randomize_parameters(scaling);
         }
     }
 
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index f0727b49..f15e10d2 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -36,8 +36,8 @@ namespace lib4neuro {
          * @return
          */
         virtual double eval_on_single_input(std::vector<double>* input,
-                                    std::vector<double>* output,
-                                    std::vector<double>* weights = nullptr) = 0;
+                                            std::vector<double>* output,
+                                            std::vector<double>* weights = nullptr) = 0;
 
         /**
          *
@@ -99,7 +99,7 @@ namespace lib4neuro {
          *
          * @param params
          */
-        virtual void set_parameters(std::vector<double> &params) = 0;
+        virtual void set_parameters(std::vector<double>& params) = 0;
 
         /**
          *
@@ -118,7 +118,8 @@ namespace lib4neuro {
          * @param jacobian
          * @param rhs
          */
-        virtual void get_jacobian_and_rhs(std::vector<std::vector<double>> &jacobian, std::vector<double> &rhs) = 0;
+        virtual void get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
+                                          std::vector<double>& rhs) = 0;
 
         /**
          *
@@ -288,7 +289,8 @@ namespace lib4neuro {
          * @param jacobian
          * @param rhs
          */
-        LIB4NEURO_API virtual void get_jacobian_and_rhs(std::vector<std::vector<double>> &jacobian, std::vector<double> &rhs) override ;
+        LIB4NEURO_API virtual void get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
+                                                        std::vector<double>& rhs) override;
         /**
          *
          * @param weights
@@ -450,19 +452,19 @@ namespace lib4neuro {
          *
          * @param params
          */
-        LIB4NEURO_API virtual void set_parameters(std::vector<double> &params) override;
+        LIB4NEURO_API virtual void set_parameters(std::vector<double>& params) override;
 
         /**
          *
          * @return
          */
-        LIB4NEURO_API virtual size_t get_n_data_set() override ;
+        LIB4NEURO_API virtual size_t get_n_data_set() override;
 
         /**
          *
          * @return
          */
-        LIB4NEURO_API virtual size_t get_n_test_data_set() override ;
+        LIB4NEURO_API virtual size_t get_n_test_data_set() override;
 
         /**
          *
@@ -643,7 +645,7 @@ namespace lib4neuro {
          *
          * @param params
          */
-        LIB4NEURO_API virtual void set_parameters(std::vector<double> &params) override;
+        LIB4NEURO_API virtual void set_parameters(std::vector<double>& params) override;
 
         /**
          *
@@ -655,7 +657,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API virtual size_t get_n_test_data_set() override ;
+        LIB4NEURO_API virtual size_t get_n_test_data_set() override;
 
         /**
          *
@@ -680,8 +682,8 @@ namespace lib4neuro {
          * @param rhs
          */
         LIB4NEURO_API virtual void get_jacobian_and_rhs(
-                std::vector<std::vector<double>> &jacobian,
-                std::vector<double> &rhs) override;
+            std::vector<std::vector<double>>& jacobian,
+            std::vector<double>& rhs) override;
 
         /**
          *
@@ -691,7 +693,7 @@ namespace lib4neuro {
 
     protected:
         std::vector<ErrorFunction*>* summand;
-        std::vector<double> summand_coefficient;
+        std::vector<double>        summand_coefficient;
     };
 }
 
diff --git a/src/General/ExprtkWrapperSerialization.h b/src/General/ExprtkWrapperSerialization.h
index a57a8823..15304812 100644
--- a/src/General/ExprtkWrapperSerialization.h
+++ b/src/General/ExprtkWrapperSerialization.h
@@ -13,8 +13,8 @@
 BOOST_CLASS_EXPORT_KEY(ExprtkWrapper);
 
 typedef exprtk::symbol_table<double> symbol_table_t;
-typedef exprtk::expression<double> expression_t;
-typedef exprtk::parser<double> parser_t;
+typedef exprtk::expression<double>   expression_t;
+typedef exprtk::parser<double>       parser_t;
 
 /**
  * Class implementing the private properties
diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index 3d6d71e4..d9a2c55b 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -14,10 +14,10 @@ namespace lib4neuro {
                                      size_t n_to_restart,
                                      int max_iters,
                                      size_t batch) {
-        this->tolerance = epsilon;
+        this->tolerance         = epsilon;
         this->restart_frequency = n_to_restart;
-        this->maximum_niters = max_iters;
-        this->batch = batch;
+        this->maximum_niters    = max_iters;
+        this->batch             = batch;
     }
 
     GradientDescent::~GradientDescent() {
@@ -44,25 +44,25 @@ namespace lib4neuro {
     }
 
     bool GradientDescent::perform_feasible_1D_step(
-            lib4neuro::ErrorFunction& ef,
-            double error_previous,
-            double step_coefficient,
-            std::shared_ptr<std::vector<double>> direction,
-            std::shared_ptr<std::vector<double>> parameters_before,
-            std::shared_ptr<std::vector<double>> parameters_after
+        lib4neuro::ErrorFunction& ef,
+        double error_previous,
+        double step_coefficient,
+        std::shared_ptr<std::vector<double>> direction,
+        std::shared_ptr<std::vector<double>> parameters_before,
+        std::shared_ptr<std::vector<double>> parameters_after
     ) {
 
         size_t i;
 
-        boost::random::mt19937 gen(std::time(0));
+        boost::random::mt19937                    gen(std::time(0));
         boost::random::uniform_int_distribution<> dis(0,
                                                       direction->size());
-        size_t max_dir_idx = dis(gen);
+        size_t                                    max_dir_idx = dis(gen);
 
         double error_current = error_previous + 1.0;
         while (error_current >= error_previous) {
             (*parameters_after)[max_dir_idx] =
-                    (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
+                (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
 
             error_current = ef.eval(parameters_after.get());
             if (step_coefficient < 1e-32) {
@@ -92,16 +92,16 @@ namespace lib4neuro {
             *ofs << "Initial error: " << ef.eval() << std::endl;
         }
 
-        double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
-        double grad_norm_prev;
-        size_t i;
-        long long int iter_idx = this->maximum_niters;
-        size_t iter_counter = 0;
+        double        grad_norm    = this->tolerance * 10.0, gamma, sx, beta;
+        double        grad_norm_prev;
+        size_t        i;
+        long long int iter_idx     = this->maximum_niters;
+        size_t        iter_counter = 0;
 
-        gamma = 1.0;
+        gamma                = 1.0;
         double prev_val, val = 0.0, c = 1.25;
 
-        size_t n_parameters = ef.get_dimension();
+        size_t n_parameters                 = ef.get_dimension();
 
 
         std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
@@ -120,11 +120,11 @@ namespace lib4neuro {
 
         val = ef.eval(params_current);
         size_t counter_good_guesses = 0, counter_bad_guesses = 0, counter_simplified_direction_good = 0, counter_simplified_direction_bad = 0;
-        double cooling = 1.0;
+        double cooling              = 1.0;
         while (grad_norm > this->tolerance && (iter_idx != 0)) {
             iter_idx--;
             iter_counter++;
-            prev_val = val;
+            prev_val       = val;
             grad_norm_prev = grad_norm;
 
             /* reset of the current gradient */
@@ -147,11 +147,11 @@ namespace lib4neuro {
             /* step length calculation */
             if (iter_counter < 10 || iter_counter % this->restart_frequency == 0) {
                 /* fixed step length */
-                gamma = 0.1 * this->tolerance;
+                gamma   = 0.1 * this->tolerance;
                 cooling = 1.0;
             } else {
                 /* angle between two consecutive gradients */
-                sx = 0.0;
+                sx     = 0.0;
                 for (i = 0; i < gradient_current->size(); ++i) {
                     sx += (gradient_current->at(i) * gradient_prev->at(i));
                 }
@@ -161,7 +161,7 @@ namespace lib4neuro {
                 } else if (sx > 1.0 - 5e-12) {
                     sx = 1 - 5e-12;
                 }
-                beta = std::sqrt(std::acos(sx) / lib4neuro::PI);
+                beta   = std::sqrt(std::acos(sx) / lib4neuro::PI);
 
                 eval_step_size_mk(gamma,
                                   beta,
@@ -181,12 +181,12 @@ namespace lib4neuro {
 
 
             /* switcheroo */
-            ptr_mem = gradient_prev;
-            gradient_prev = gradient_current;
+            ptr_mem          = gradient_prev;
+            gradient_prev    = gradient_current;
             gradient_current = ptr_mem;
 
-            ptr_mem = params_prev;
-            params_prev = params_current;
+            ptr_mem        = params_prev;
+            params_prev    = params_current;
             params_current = ptr_mem;
 
 
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index 7d76d1b1..52ff7580 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -78,12 +78,12 @@ namespace lib4neuro {
          * @param parameters_after[out] suggested state of the parameters after the analysis completes
          */
         virtual bool perform_feasible_1D_step(
-                lib4neuro::ErrorFunction& ef,
-                double error_previous,
-                double step_coefficient,
-                std::shared_ptr<std::vector<double>> direction,
-                std::shared_ptr<std::vector<double>> parameters_before,
-                std::shared_ptr<std::vector<double>> parameters_after
+            lib4neuro::ErrorFunction& ef,
+            double error_previous,
+            double step_coefficient,
+            std::shared_ptr<std::vector<double>> direction,
+            std::shared_ptr<std::vector<double>> parameters_before,
+            std::shared_ptr<std::vector<double>> parameters_after
         );
 
     public:
diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp
index 7af37f31..971d15ef 100644
--- a/src/LearningMethods/GradientDescentBB.cpp
+++ b/src/LearningMethods/GradientDescentBB.cpp
@@ -13,10 +13,10 @@ namespace lib4neuro {
                                          size_t n_to_restart,
                                          int max_iters,
                                          size_t batch) {
-        this->tolerance = epsilon;
+        this->tolerance         = epsilon;
         this->restart_frequency = n_to_restart;
-        this->maximum_niters = max_iters;
-        this->batch = batch;
+        this->maximum_niters    = max_iters;
+        this->batch             = batch;
     }
 
     GradientDescentBB::~GradientDescentBB() {
@@ -35,16 +35,16 @@ namespace lib4neuro {
             *ofs << "Initial error: " << ef.eval() << std::endl;
         }
 
-        double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
-        double grad_norm_prev;
-        size_t i;
-        long long int iter_idx = this->maximum_niters;
-        size_t iter_counter = 0;
+        double        grad_norm    = this->tolerance * 10.0, gamma, sx, beta;
+        double        grad_norm_prev;
+        size_t        i;
+        long long int iter_idx     = this->maximum_niters;
+        size_t        iter_counter = 0;
 
-        gamma = 1.0;
+        gamma                = 1.0;
         double prev_val, val = 0.0, c = 1.25, val_best;
 
-        size_t n_parameters = ef.get_dimension();
+        size_t n_parameters                 = ef.get_dimension();
 
 
         std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
@@ -55,9 +55,9 @@ namespace lib4neuro {
 
         std::vector<double>* ptr_mem;
 
-        double alpha = -1.0, cc, gg;
+        double              alpha = -1.0, cc, gg;
         std::vector<double> dot__(3);
-        double d1 = 0.0, d2 = 0.0, d3 = 0.0;
+        double              d1    = 0.0, d2 = 0.0, d3 = 0.0;
 
 
         std::fill(gradient_current->begin(),
@@ -66,14 +66,14 @@ namespace lib4neuro {
         std::fill(gradient_prev->begin(),
                   gradient_prev->end(),
                   0.0);
-        val = ef.eval(params_current);
+        val      = ef.eval(params_current);
         val_best = val;
 
         double cooling_factor = 1.0;
         while (grad_norm > this->tolerance && (iter_idx != 0)) {
             iter_idx--;
             iter_counter++;
-            prev_val = val;
+            prev_val       = val;
             grad_norm_prev = grad_norm;
 
             /* reset of the current gradient */
@@ -97,7 +97,7 @@ namespace lib4neuro {
             /* step length calculation */
             if (iter_counter < 10 || iter_counter % this->restart_frequency < 10) {
                 /* fixed step length */
-                gamma = 0.1 * this->tolerance;
+                gamma          = 0.1 * this->tolerance;
                 cooling_factor = 1.0;
             } else {
 
@@ -131,12 +131,12 @@ namespace lib4neuro {
 
 
             /* switcheroo */
-            ptr_mem = gradient_prev;
-            gradient_prev = gradient_current;
+            ptr_mem          = gradient_prev;
+            gradient_prev    = gradient_current;
             gradient_current = ptr_mem;
 
-            ptr_mem = params_prev;
-            params_prev = params_current;
+            ptr_mem        = params_prev;
+            params_prev    = params_current;
             params_current = ptr_mem;
 
             val = ef.eval(params_current);
@@ -206,7 +206,7 @@ namespace lib4neuro {
 
         delete gradient_current;
         delete gradient_prev;
-        delete params_current ;
+        delete params_current;
         delete params_prev;
         delete params_best;
 
diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp
index 784f43c9..1f9cdc30 100644
--- a/src/LearningMethods/GradientDescentSingleItem.cpp
+++ b/src/LearningMethods/GradientDescentSingleItem.cpp
@@ -15,10 +15,10 @@ namespace lib4neuro {
                                                          size_t n_to_restart,
                                                          int max_iters,
                                                          size_t batch) {
-        this->tolerance = epsilon;
+        this->tolerance         = epsilon;
         this->restart_frequency = n_to_restart;
-        this->maximum_niters = max_iters;
-        this->batch = batch;
+        this->maximum_niters    = max_iters;
+        this->batch             = batch;
     }
 
     GradientDescentSingleItem::~GradientDescentSingleItem() {
@@ -36,7 +36,7 @@ namespace lib4neuro {
 
         double alpha = 10.0 / n_elems;
         alpha = 1.0;
-        double value = f.eval();
+        double value         = f.eval();
         double value_shifted = value + 1.0;
 
 
@@ -58,13 +58,13 @@ namespace lib4neuro {
                                              std::ofstream* ofs) {
 
         COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..."
-                          << std::endl);
+                      << std::endl);
         COUT_INFO("Initial error: " << ef.eval() << std::endl);
 
         size_t total_elements = ef.get_n_data_set(), updated_elements = 0, iter = 0;
-        double max_error = 1.0, error, gamma;
-        size_t iter_idx = this->maximum_niters;
-        size_t dim = ef.get_dimension();
+        double max_error      = 1.0, error, gamma;
+        size_t iter_idx       = this->maximum_niters;
+        size_t dim            = ef.get_dimension();
 
         std::vector<double> parameter_vector = ef.get_parameters();
         std::vector<double> gradient_vector(dim);
@@ -74,7 +74,7 @@ namespace lib4neuro {
             iter_idx--;
             iter++;
 
-            max_error = 0.0;
+            max_error        = 0.0;
             updated_elements = 0;
             std::fill(search_direction.begin(),
                       search_direction.end(),
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index 8fe76613..01bc7f42 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -12,7 +12,7 @@ namespace lib4neuro {
 
     LearningSequence::LearningSequence(double tolerance,
                                        int max_n_cycles) {
-        this->tol = tolerance;
+        this->tol                  = tolerance;
         this->max_number_of_cycles = max_n_cycles;
     }
 
@@ -27,7 +27,7 @@ namespace lib4neuro {
         double error = ef.eval();
         this->optimal_parameters = ef.get_parameters();
         double the_best_error = error;
-        int mcycles = this->max_number_of_cycles, cycle_idx = 0;
+        int    mcycles        = this->max_number_of_cycles, cycle_idx = 0;
 
         std::vector<double> params;
         while (error > this->tol && mcycles != 0) {
@@ -40,7 +40,7 @@ namespace lib4neuro {
 
                 //TODO do NOT copy vectors if not needed
                 params = *m->get_parameters();
-                error = ef.eval(&params);
+                error  = ef.eval(&params);
 
                 ef.set_parameters(params);
 
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index 928426b6..c3fe9898 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -43,39 +43,42 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
 };
 
 void lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl::get_jacobian_and_rhs(
-        lib4neuro::ErrorFunction& ef,
-        arma::Mat<double>& J,
-        arma::Col<double>& rhs,
-        size_t data_subset_size) {
+    lib4neuro::ErrorFunction& ef,
+    arma::Mat<double>& J,
+    arma::Col<double>& rhs,
+    size_t data_subset_size) {
 
 
     std::vector<std::vector<double>> jacobian;
-    std::vector<double> rhs_vec;
+    std::vector<double>              rhs_vec;
 
-    if(data_subset_size <= 0){
-        data_subset_size =  ef.get_n_data_set();
+    if (data_subset_size <= 0) {
+        data_subset_size = ef.get_n_data_set();
     }
 
-    if(data_subset_size < ef.get_n_data_set()){
-        ef.divide_data_train_test((double)data_subset_size / (double)ef.get_n_data_set());
+    if (data_subset_size < ef.get_n_data_set()) {
+        ef.divide_data_train_test((double) data_subset_size / (double) ef.get_n_data_set());
     }
-    ef.get_jacobian_and_rhs(jacobian, rhs_vec);
+    ef.get_jacobian_and_rhs(jacobian,
+                            rhs_vec);
 
-    if(data_subset_size < ef.get_n_data_set()){
+    if (data_subset_size < ef.get_n_data_set()) {
         ef.return_full_data_set_for_training();
     }
 
-    size_t dim_out = jacobian.size();
+    size_t dim_out      = jacobian.size();
     size_t n_parameters = rhs_vec.size();
 
-    J.reshape(dim_out, n_parameters);
+    J.reshape(dim_out,
+              n_parameters);
     rhs.resize(n_parameters);
     J.fill(0.0);
     rhs.fill(0.0);
 
     for (size_t ri = 0; ri < jacobian.size(); ++ri) {
         for (size_t ci = 0; ci < n_parameters; ++ci) {
-            J.at(ri, ci) = jacobian[ri][ci];
+            J.at(ri,
+                 ci) = jacobian[ri][ci];
         }
     }
     for (size_t ci = 0; ci < n_parameters; ++ci) {
@@ -94,15 +97,15 @@ namespace lib4neuro {
                                            double lambda_increase,
                                            double lambda_decrease) : p_impl(new LevenbergMarquardtImpl()) {
 
-        this->p_impl->batch_size = bs;
-        this->p_impl->tolerance = tolerance;
-        this->p_impl->tolerance_gradient = tolerance_gradient;
-        this->p_impl->tolerance_parameters = tolerance_parameters;
+        this->p_impl->batch_size                   = bs;
+        this->p_impl->tolerance                    = tolerance;
+        this->p_impl->tolerance_gradient           = tolerance_gradient;
+        this->p_impl->tolerance_parameters         = tolerance_parameters;
         this->p_impl->LM_step_acceptance_threshold = LM_step_acceptance_threshold;
-        this->p_impl->lambda_initial = lambda_initial;
-        this->p_impl->lambda_increase = lambda_increase;
-        this->p_impl->lambda_decrease = lambda_decrease;
-        this->p_impl->maximum_niters = max_iters;
+        this->p_impl->lambda_initial               = lambda_initial;
+        this->p_impl->lambda_increase              = lambda_increase;
+        this->p_impl->lambda_decrease              = lambda_decrease;
+        this->p_impl->maximum_niters               = max_iters;
     }
 
     void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
@@ -118,13 +121,13 @@ namespace lib4neuro {
         double current_err = ef.eval();
 
         COUT_INFO(
-                "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
+            "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
         if (ofs && ofs->is_open()) {
             *ofs << "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err
                  << std::endl;
         }
 
-        size_t n_parameters = ef.get_dimension();
+        size_t n_parameters  = ef.get_dimension();
         size_t n_data_points = ef.get_n_data_set();
         if (this->p_impl->batch_size > 0) {
             n_data_points = this->p_impl->batch_size;
@@ -140,21 +143,21 @@ namespace lib4neuro {
         arma::Mat<double> H_new(n_data_points,
                                 n_parameters);
 
-        double lambda = this->p_impl->lambda_initial;  // Dumping parameter
+        double lambda   = this->p_impl->lambda_initial;  // Dumping parameter
         double prev_err = 0, update_norm = 0, gradient_norm = 0, mem_double = 0, jacobian_norm = 1;
 
 
-        bool update_J = true;
-        arma::Col<double> update;
-        arma::Col<double> rhs;
+        bool                update_J = true;
+        arma::Col<double>   update;
+        arma::Col<double>   rhs;
         std::vector<double> d_prep(n_data_points);
-        arma::Col<double> d;
+        arma::Col<double>   d;
 
         double slowdown_coeff = 0.25;
         //-------------------//
         // Solver iterations //
         //-------------------//
-        size_t iter_counter = 0;
+        size_t iter_counter   = 0;
         do {
 
             if (update_J) {
@@ -172,7 +175,7 @@ namespace lib4neuro {
                     mem_double *= mem_double;
                     gradient_norm += mem_double;
                 }
-                gradient_norm = std::sqrt(gradient_norm) / J.n_rows;
+                gradient_norm  = std::sqrt(gradient_norm) / J.n_rows;
 
                 /* Get approximation of Hessian (H ~ J'*J) */
                 H = J.t() * J;
@@ -185,7 +188,7 @@ namespace lib4neuro {
                                                          ci);
                     }
                 }
-                jacobian_norm = std::sqrt(jacobian_norm);
+                jacobian_norm  = std::sqrt(jacobian_norm);
 
                 /* Evaluate the error before updating parameters */
                 prev_err = ef.eval();
@@ -206,8 +209,8 @@ namespace lib4neuro {
                 params_tmp->at(i) = params_current->at(i) + update.at(i);
                 update_norm += update.at(i) * update.at(i);
             }
-            update_norm = std::sqrt(update_norm);
-            current_err = ef.eval(params_tmp.get());
+            update_norm   = std::sqrt(update_norm);
+            current_err   = ef.eval(params_tmp.get());
 
             /* Check, if the parameter update improved the function */
             if (current_err < prev_err) {
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index 123f0f42..8d376841 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -35,8 +35,8 @@
 void Particle::randomize_coordinates() {
 
     std::random_device seeder;
-    std::mt19937 gen(seeder());
-    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
+    std::mt19937       gen(seeder());
+    for (unsigned int  i = 0; i < this->coordinate_dim; ++i) {
         std::uniform_real_distribution<double> dist_coord(this->domain_bounds->at(2 * i),
                                                           this->domain_bounds->at(2 * i + 1));
         (*this->coordinate)[i] = dist_coord(gen);
@@ -45,8 +45,8 @@ void Particle::randomize_coordinates() {
 
 void Particle::randomize_parameters() {
 
-    std::random_device seeder;
-    std::mt19937 gen(seeder());
+    std::random_device                     seeder;
+    std::mt19937                           gen(seeder());
     std::uniform_real_distribution<double> dist_vel(0.5,
                                                     1.0);
     this->r1 = dist_vel(gen);
@@ -55,11 +55,11 @@ void Particle::randomize_parameters() {
 }
 
 void Particle::randomize_velocity() {
-    std::random_device seeder;
-    std::mt19937 gen(seeder());
+    std::random_device                     seeder;
+    std::mt19937                           gen(seeder());
     std::uniform_real_distribution<double> dist_vel(0.5,
                                                     1.0);
-    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
+    for (unsigned int                      i = 0; i < this->coordinate_dim; ++i) {
         (*this->velocity)[i] = dist_vel(gen);
     }
 }
@@ -67,13 +67,13 @@ void Particle::randomize_velocity() {
 Particle::Particle(lib4neuro::ErrorFunction* ef,
                    std::vector<double>* domain_bounds) {
 
-    this->ef = ef;
-    this->domain_bounds = new std::vector<double>(*domain_bounds);
+    this->ef             = ef;
+    this->domain_bounds  = new std::vector<double>(*domain_bounds);
     this->coordinate_dim = ef->get_dimension();
-    this->ef = ef;
+    this->ef             = ef;
 
-    this->coordinate = new std::vector<double>(this->coordinate_dim);
-    this->velocity = new std::vector<double>(this->coordinate_dim);
+    this->coordinate         = new std::vector<double>(this->coordinate_dim);
+    this->velocity           = new std::vector<double>(this->coordinate_dim);
     this->optimal_coordinate = new std::vector<double>(this->coordinate_dim);
 
 
@@ -103,15 +103,15 @@ Particle::Particle(lib4neuro::ErrorFunction* ef,
 
 
     for (size_t i = 0; i < central_system->size(); ++i) {
-        this->domain_bounds->at(2 * i) = central_system->at(i) - dispersion_coeff;
+        this->domain_bounds->at(2 * i)     = central_system->at(i) - dispersion_coeff;
         this->domain_bounds->at(2 * i + 1) = central_system->at(i) + dispersion_coeff;
     }
 
     this->coordinate_dim = ef->get_dimension();
-    this->ef = ef;
+    this->ef             = ef;
 
-    this->coordinate = new std::vector<double>(this->coordinate_dim);
-    this->velocity = new std::vector<double>(this->coordinate_dim);
+    this->coordinate         = new std::vector<double>(this->coordinate_dim);
+    this->velocity           = new std::vector<double>(this->coordinate_dim);
     this->optimal_coordinate = new std::vector<double>(this->coordinate_dim);
 
 
@@ -185,8 +185,8 @@ double Particle::change_coordinate(double w,
 
     /* Choose random global minima */
     std::vector<double>* random_global_best;
-    std::random_device rand_dev;
-    std::mt19937 engine{rand_dev()};
+    std::random_device                    rand_dev;
+    std::mt19937                          engine{rand_dev()};
     std::uniform_int_distribution<size_t> dist(0,
                                                global_min_vec.size() - 1);
     random_global_best = &global_min_vec[dist(engine)];
@@ -260,13 +260,13 @@ namespace lib4neuro {
 
         if (epsilon < 0 || gamma < 0 || delta < 0) {
             THROW_INVALID_ARGUMENT_ERROR(
-                    "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
+                "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
         }
 
-        this->gamma = gamma;
+        this->gamma   = gamma;
         this->epsilon = epsilon;
-        this->delta = delta;
-        this->pst = PARTICLE_SWARM_TYPE::GENERAL;
+        this->delta   = delta;
+        this->pst     = PARTICLE_SWARM_TYPE::GENERAL;
 
         this->init_constructor(domain_bounds,
                                c1,
@@ -298,15 +298,15 @@ namespace lib4neuro {
 
         if (this->epsilon < 0 || this->gamma < 0 || this->delta < 0) {
             THROW_INVALID_ARGUMENT_ERROR(
-                    "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
+                "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
         }
 
-        this->func_dim = ef.get_dimension();
+        this->func_dim         = ef.get_dimension();
 
 
         /* initialize the particles */
         std::vector<double> centroids(ef.get_parameters());
-        for (size_t pi = 0; pi < this->particle_swarm.size(); ++pi) {
+        for (size_t         pi = 0; pi < this->particle_swarm.size(); ++pi) {
             if (this->particle_swarm.at(pi)) {
                 delete this->particle_swarm.at(pi);
             }
@@ -318,29 +318,29 @@ namespace lib4neuro {
 
         this->optimal_parameters.resize(this->func_dim);
 
-        size_t outer_it = 0;
+        size_t outer_it                                = 0;
         Particle* particle;
 
         std::vector<std::vector<double>> global_best_vec;
-        double optimal_value = 0.0;
+        double                           optimal_value = 0.0;
 
         std::set<Particle*> cluster; //!< Particles in a cluster
         std::vector<double>* centroid = new std::vector<double>(this->func_dim);//<! Centroid coordinates
 
         double tmp_velocity;
-        double prev_max_velocity = 0;
+        double prev_max_velocity      = 0;
         double max_velocity;
-        double max_vel_step = 0;
+        double max_vel_step           = 0;
         double prev_max_vel_step;
         double euclidean_dist;
-        double current_err = -1;
+        double current_err            = -1;
 
         this->determine_optimal_coordinate_and_value(this->optimal_parameters,
                                                      optimal_value);
         COUT_INFO("Initial best value: " << optimal_value << std::endl);
 
         while (outer_it < this->iter_max) {
-            max_velocity = 0;
+            max_velocity   = 0;
             euclidean_dist = 0;
 
             //////////////////////////////////////////////////
@@ -377,7 +377,7 @@ namespace lib4neuro {
             }
 
             for (size_t pi = 0; pi < this->n_particles; pi++) {
-                particle = this->particle_swarm.at(pi);
+                particle     = this->particle_swarm.at(pi);
                 tmp_velocity = particle->change_coordinate(this->w,
                                                            this->c1,
                                                            this->c2,
@@ -386,7 +386,7 @@ namespace lib4neuro {
 
                 if (tmp_velocity > max_velocity) {
                     prev_max_velocity = max_velocity;
-                    max_velocity = tmp_velocity;
+                    max_velocity      = tmp_velocity;
                 }
 
                 /* Looking for nearby particles */
@@ -405,7 +405,7 @@ namespace lib4neuro {
             //}
 
             prev_max_vel_step = max_vel_step;
-            max_vel_step = max_velocity - prev_max_velocity;
+            max_vel_step      = max_velocity - prev_max_velocity;
 
             //TODO only in verbose mode
             euclidean_dist /= this->n_particles;
@@ -480,7 +480,7 @@ namespace lib4neuro {
         }
 
         this->err_thresh = err_thresh;
-        this->pst = pst;
+        this->pst        = pst;
 
         this->init_constructor(domain_bounds,
                                c1,
@@ -534,8 +534,8 @@ namespace lib4neuro {
 
     double ParticleSwarm::get_euclidean_distance(std::vector<double>* a,
                                                  std::vector<double>* b) {
-        double dist = 0, m;
-        for (size_t i = 0; i < a->size(); i++) {
+        double      dist = 0, m;
+        for (size_t i    = 0; i < a->size(); i++) {
             m = (*a)[i] - (*b)[i];
             m *= m;
             dist += m;
@@ -549,12 +549,12 @@ namespace lib4neuro {
                                          double w,
                                          size_t n_particles,
                                          size_t iter_max) {
-        this->c1 = c1;
-        this->c2 = c2;
-        this->c3 = (c1 + c2) / 2.0;
-        this->w = w;
+        this->c1          = c1;
+        this->c2          = c2;
+        this->c3          = (c1 + c2) / 2.0;
+        this->w           = w;
         this->n_particles = n_particles;
-        this->iter_max = iter_max;
+        this->iter_max    = iter_max;
         this->particle_swarm.resize(this->n_particles);
         std::fill(this->particle_swarm.begin(),
                   this->particle_swarm.end(),
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index 57ce75c8..642b925c 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -21,7 +21,7 @@ private:
 
     size_t coordinate_dim;
     std::vector<double>* coordinate = nullptr;
-    std::vector<double>* velocity = nullptr;
+    std::vector<double>* velocity   = nullptr;
 
     std::vector<double>* optimal_coordinate = nullptr;
     double optimal_value;
@@ -256,15 +256,15 @@ namespace lib4neuro {
          * @param iter_max Maximal number of iterations - optimization will stop after that, even if not converged
          */
         LIB4NEURO_API explicit ParticleSwarm(
-                std::vector<double>* domain_bounds,
-                double c1 = 1.711897,
-                double c2 = 1.711897,
-                double w = 0.711897,
-                double gamma = 0.5,
-                double epsilon = 0.02,
-                double delta = 0.7,
-                size_t n_particles = 50,
-                size_t iter_max = 1000
+            std::vector<double>* domain_bounds,
+            double c1 = 1.711897,
+            double c2 = 1.711897,
+            double w = 0.711897,
+            double gamma = 0.5,
+            double epsilon = 0.02,
+            double delta = 0.7,
+            size_t n_particles = 50,
+            size_t iter_max = 1000
         );
 
         /**
@@ -286,14 +286,14 @@ namespace lib4neuro {
          *                   ErrorFunction
          */
         LIB4NEURO_API explicit ParticleSwarm(
-                std::vector<double>* domain_bounds,
-                double err_thresh,
-                PARTICLE_SWARM_TYPE,
-                double c1 = 1.711897,
-                double c2 = 1.711897,
-                double w = 0.711897,
-                size_t n_particles = 50,
-                size_t iter_max = 1000
+            std::vector<double>* domain_bounds,
+            double err_thresh,
+            PARTICLE_SWARM_TYPE,
+            double c1 = 1.711897,
+            double c2 = 1.711897,
+            double w = 0.711897,
+            size_t n_particles = 50,
+            size_t iter_max = 1000
         );
 
         /**
diff --git a/src/NetConnection/ConnectionFunctionIdentity.cpp b/src/NetConnection/ConnectionFunctionIdentity.cpp
index d12ca4dd..1f41d76f 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.cpp
+++ b/src/NetConnection/ConnectionFunctionIdentity.cpp
@@ -18,7 +18,7 @@ ConnectionFunctionIdentity::ConnectionFunctionIdentity() {
 }
 
 ConnectionFunctionIdentity::ConnectionFunctionIdentity(size_t pidx) {
-    this->param_idx = pidx;
+    this->param_idx  = pidx;
     this->is_unitary = false;
 }
 
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index 5ba0aa99..fe8bc379 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -19,8 +19,8 @@ namespace lib4neuro {
     NeuralNetwork::NeuralNetwork() {
 
 
-        this->delete_weights = true;
-        this->delete_biases = true;
+        this->delete_weights  = true;
+        this->delete_biases   = true;
         this->layers_analyzed = false;
     }
 
@@ -30,10 +30,11 @@ namespace lib4neuro {
             try {
                 boost::archive::text_iarchive ia(ifs);
                 ia >> *this;
-            } catch (boost::archive::archive_exception& e) {
+            }
+            catch (boost::archive::archive_exception& e) {
                 THROW_RUNTIME_ERROR(
-                        "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
-                                                                   "the serialized DataSet.");
+                    "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
+                                                               "the serialized DataSet.");
             }
             ifs.close();
         } else {
@@ -95,7 +96,7 @@ namespace lib4neuro {
         }
 
         double potential, bias;
-        int bias_idx;
+        int    bias_idx;
 
         this->copy_parameter_space(custom_weights_and_biases);
 
@@ -123,8 +124,8 @@ namespace lib4neuro {
             /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
 
             for (auto si: *layer) {
-                bias = 0.0;
-                bias_idx = this->neuron_bias_indices.at(si);
+                bias      = 0.0;
+                bias_idx  = this->neuron_bias_indices.at(si);
                 if (bias_idx >= 0) {
                     bias = this->neuron_biases.at(bias_idx);
                 }
@@ -138,7 +139,7 @@ namespace lib4neuro {
                     size_t ci = c.second;
 
                     this->neuron_potentials.at(ti) +=
-                            this->connection_list.at(ci)->eval(this->connection_weights) * potential;
+                        this->connection_list.at(ci)->eval(this->connection_weights) * potential;
 
                     std::cout << "  adding input to neuron " << ti << " += "
                               << this->connection_list.at(ci)->eval(this->connection_weights) << "*" << potential
@@ -148,8 +149,8 @@ namespace lib4neuro {
         }
 
         unsigned int i = 0;
-        for (auto oi: this->output_neuron_indices) {
-            bias = 0.0;
+        for (auto    oi: this->output_neuron_indices) {
+            bias     = 0.0;
             bias_idx = this->neuron_bias_indices.at(oi);
             if (bias_idx >= 0) {
                 bias = this->neuron_biases.at(bias_idx);
@@ -256,9 +257,9 @@ namespace lib4neuro {
         this->neuron_biases.clear();
 
         this->connection_weights = parent_network.connection_weights;
-        this->neuron_biases = parent_network.neuron_biases;
+        this->neuron_biases      = parent_network.neuron_biases;
 
-        this->delete_biases = false;
+        this->delete_biases  = false;
         this->delete_weights = false;
     }
 
@@ -279,7 +280,7 @@ namespace lib4neuro {
         }
 
         double potential, bias;
-        int bias_idx;
+        int    bias_idx;
 
         this->copy_parameter_space(custom_weights_and_biases);
 
@@ -303,8 +304,8 @@ namespace lib4neuro {
             /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
 
             for (auto si: *layer) {
-                bias = 0.0;
-                bias_idx = this->neuron_bias_indices.at(si);
+                bias      = 0.0;
+                bias_idx  = this->neuron_bias_indices.at(si);
                 if (bias_idx >= 0) {
                     bias = this->neuron_biases.at(bias_idx);
                 }
@@ -316,14 +317,14 @@ namespace lib4neuro {
                     size_t ci = c.second;
 
                     this->neuron_potentials.at(ti) +=
-                            this->connection_list.at(ci)->eval(this->connection_weights) * potential;
+                        this->connection_list.at(ci)->eval(this->connection_weights) * potential;
                 }
             }
         }
 
         unsigned int i = 0;
-        for (auto oi: this->output_neuron_indices) {
-            bias = 0.0;
+        for (auto    oi: this->output_neuron_indices) {
+            bias     = 0.0;
             bias_idx = this->neuron_bias_indices.at(oi);
             if (bias_idx >= 0) {
                 bias = this->neuron_biases.at(bias_idx);
@@ -346,16 +347,16 @@ namespace lib4neuro {
 
         size_t bias_shift = this->get_n_weights();
         size_t neuron_idx;
-        int bias_idx;
+        int    bias_idx;
         double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;
 
         NeuronDifferentiable* active_neuron;
 
         /* initial error propagation */
         std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
-                this->neuron_layers_feedforward.size() - 1);
+            this->neuron_layers_feedforward.size() - 1);
         //TODO might not work in the future as the output neurons could be permuted
-        for (size_t i = 0; i < current_layer->size(); ++i) {
+        for (size_t                            i             = 0; i < current_layer->size(); ++i) {
             neuron_idx = current_layer->at(i);
             scaling_backprog[neuron_idx] = error_derivative[i] * error_scaling;
         }
@@ -367,22 +368,22 @@ namespace lib4neuro {
 
             for (size_t i = 0; i < current_layer->size(); ++i) {
 
-                neuron_idx = current_layer->at(i);
+                neuron_idx    = current_layer->at(i);
                 active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
 
                 if (active_neuron) {
-                    bias_idx = this->neuron_bias_indices.at(neuron_idx);
+                    bias_idx         = this->neuron_bias_indices.at(neuron_idx);
                     neuron_potential = this->neuron_potentials.at(neuron_idx);
 
                     if (bias_idx >= 0) {
                         neuron_bias = this->neuron_biases.at(bias_idx);
                         gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
                                                            active_neuron->activation_function_eval_derivative_bias(
-                                                                   neuron_potential,
-                                                                   neuron_bias);
+                                                               neuron_potential,
+                                                               neuron_bias);
                         scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
-                                neuron_potential,
-                                neuron_bias);
+                            neuron_potential,
+                            neuron_bias);
                     }
 
                     /* connections to lower level neurons */
@@ -391,7 +392,7 @@ namespace lib4neuro {
                         size_t ci = c.second;
 
                         neuron_potential_t = this->neurons.at(ti)->get_last_activation_value();
-                        connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
+                        connection_weight  = this->connection_list.at(ci)->eval(this->connection_weights);
 
                         this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
                                                                               gradient,
@@ -402,7 +403,7 @@ namespace lib4neuro {
                     }
                 } else {
                     THROW_INVALID_ARGUMENT_ERROR(
-                            "Neuron used in backpropagation does not contain differentiable activation function!\n");
+                        "Neuron used in backpropagation does not contain differentiable activation function!\n");
                 }
             }
         }
@@ -420,14 +421,14 @@ namespace lib4neuro {
 
         size_t bias_shift = this->get_n_weights();
         size_t neuron_idx;
-        int bias_idx;
+        int    bias_idx;
         double neuron_potential, neuron_activation_t, neuron_bias, connection_weight;
 
         NeuronDifferentiable* active_neuron;
 
         /* initial error propagation */
         std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
-                this->neuron_layers_feedforward.size() - 1);
+            this->neuron_layers_feedforward.size() - 1);
         //TODO might not work in the future as the output neurons could be permuted
         std::cout << "Error scaling on the output layer: ";
         for (size_t i = 0; i < current_layer->size(); ++i) {
@@ -445,24 +446,24 @@ namespace lib4neuro {
 
             for (size_t i = 0; i < current_layer->size(); ++i) {
 
-                neuron_idx = current_layer->at(i);
+                neuron_idx    = current_layer->at(i);
                 active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
 
                 if (active_neuron) {
                     std::cout << "  [backpropagation] active neuron: " << neuron_idx << std::endl;
 
-                    bias_idx = this->neuron_bias_indices.at(neuron_idx);
+                    bias_idx         = this->neuron_bias_indices.at(neuron_idx);
                     neuron_potential = this->neuron_potentials.at(neuron_idx);
 
                     if (bias_idx >= 0) {
                         neuron_bias = this->neuron_biases.at(bias_idx);
                         gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
                                                            active_neuron->activation_function_eval_derivative_bias(
-                                                                   neuron_potential,
-                                                                   neuron_bias);
+                                                               neuron_potential,
+                                                               neuron_bias);
                         scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
-                                neuron_potential,
-                                neuron_bias);
+                            neuron_potential,
+                            neuron_bias);
                     }
 
                     std::cout << "      [backpropagation] scaling coefficient: " << scaling_backprog[neuron_idx]
@@ -474,7 +475,7 @@ namespace lib4neuro {
                         size_t ci = c.second;
 
                         neuron_activation_t = this->neurons.at(ti)->get_last_activation_value();
-                        connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
+                        connection_weight   = this->connection_list.at(ci)->eval(this->connection_weights);
 
                         std::cout << "      [backpropagation] value (" << ti << "): " << neuron_activation_t
                                   << ", scaling: " << scaling_backprog[neuron_idx] << std::endl;
@@ -488,7 +489,7 @@ namespace lib4neuro {
                     }
                 } else {
                     THROW_INVALID_ARGUMENT_ERROR(
-                            "Neuron used in backpropagation does not contain differentiable activation function!\n");
+                        "Neuron used in backpropagation does not contain differentiable activation function!\n");
                 }
             }
         }
@@ -517,7 +518,7 @@ namespace lib4neuro {
         // Init weight guess ("optimal" for logistic activation functions)
         boost::random::uniform_real_distribution<> dist(-1,
                                                         1);
-        for (size_t i = 0; i < this->neuron_biases.size(); i++) {
+        for (size_t                                i = 0; i < this->neuron_biases.size(); i++) {
             this->neuron_biases.at(i) = dist(gen);
         }
     }
@@ -780,7 +781,7 @@ namespace lib4neuro {
 
 
         ::std::vector<size_t> active_eval_set(2 * n);
-        size_t active_set_size[2];
+        size_t                active_set_size[2];
 
         /* feedforward analysis */
         active_set_size[0] = 0;
@@ -881,8 +882,8 @@ namespace lib4neuro {
         }
 
 
-        this->delete_weights = true;
-        this->delete_biases = true;
+        this->delete_weights  = true;
+        this->delete_biases   = true;
         this->layers_analyzed = false;
 
         unsigned int inp_dim = neuron_numbers->at(0);  //!< Network input dimension
@@ -1004,7 +1005,7 @@ namespace lib4neuro {
 
         /* Init variables containing indices of INPUT nad OUTPUT neurons */
 
-        this->input_neuron_indices = input_layer_neuron_indices;
+        this->input_neuron_indices  = input_layer_neuron_indices;
         this->output_neuron_indices = current_layer_neuron_indices;
 
         this->analyze_layer_structure();
@@ -1039,7 +1040,7 @@ namespace lib4neuro {
                                          error_partial,
                                          1.0,
                                          jacobian[i]);
-            error[i] = data.second[i] - fv[i];
+            error[i]         = data.second[i] - fv[i];
             error_partial[i] = 0;
         }
     }
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index 7a749ac0..c498374f 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -463,7 +463,7 @@ namespace lib4neuro {
          *
          * @return
          */
-         //TODO WHY IS THIS HERE?
+        //TODO WHY IS THIS HERE?
         LIB4NEURO_API NormalizationStrategy* get_normalization_strategy_instance();
 
         /**
diff --git a/src/Neuron/NeuronLogistic.cpp b/src/Neuron/NeuronLogistic.cpp
index 9b11913b..ccac42af 100644
--- a/src/Neuron/NeuronLogistic.cpp
+++ b/src/Neuron/NeuronLogistic.cpp
@@ -17,10 +17,10 @@ namespace lib4neuro {
                                        double b) {
         //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
 
-        double ex = std::pow(lib4neuro::E,
-                             x);
-        double eb = std::pow(E,
-                             b);
+        double ex    = std::pow(lib4neuro::E,
+                                x);
+        double eb    = std::pow(E,
+                                b);
         double denom = (eb + ex);
 
         this->activation_val = (eb * ex * (eb - ex)) / (denom * denom * denom);
@@ -31,11 +31,11 @@ namespace lib4neuro {
                                                                        double b) {
         //-(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
 
-        double eb = std::pow(E,
-                             b);
-        double ex = std::pow(E,
-                             x);
-        double ebex = eb * ex;
+        double eb    = std::pow(E,
+                                b);
+        double ex    = std::pow(E,
+                                x);
+        double ebex  = eb * ex;
         double denom = (eb + ex);
 
         return -(ebex * (-4 * ebex + eb * eb + ex * ex)) / (denom * denom * denom * denom);
@@ -60,11 +60,11 @@ namespace lib4neuro {
                                        double b) {
         //e^(b - x)/(e^(b - x) + 1)^2
 
-        double ex = std::pow(E,
-                             x);
-        double eb = std::pow(E,
-                             b);
-        double d = (eb / ex);
+        double ex    = std::pow(E,
+                                x);
+        double eb    = std::pow(E,
+                                b);
+        double d     = (eb / ex);
         double denom = (d + 1);
 
         this->activation_val = d / (denom * denom);
@@ -75,10 +75,10 @@ namespace lib4neuro {
                                                                        double b) {
         //(e^(b + x) (e^x - e^b))/(e^b + e^x)^3
 
-        double ex = std::pow(E,
-                             x);
-        double eb = std::pow(E,
-                             b);
+        double ex    = std::pow(E,
+                                x);
+        double eb    = std::pow(E,
+                                b);
         double denom = (eb + ex);
 
         return (eb * ex * (ex - eb)) / (denom * denom * denom);
@@ -115,10 +115,10 @@ namespace lib4neuro {
 
     double NeuronLogistic::activation_function_eval_derivative_bias(double x,
                                                                     double b) {
-        double ex = std::pow(E,
-                             b - x);
+        double ex    = std::pow(E,
+                                b - x);
         double denom = (ex + 1);
-        double res = -ex / (denom * denom);
+        double res   = -ex / (denom * denom);
 
         return res;
     }
diff --git a/src/NormalizationStrategy/NormalizationStrategy.h b/src/NormalizationStrategy/NormalizationStrategy.h
index a015d236..ae961f6e 100644
--- a/src/NormalizationStrategy/NormalizationStrategy.h
+++ b/src/NormalizationStrategy/NormalizationStrategy.h
@@ -23,7 +23,7 @@ public:
      */
     struct access;
 
-    virtual ~NormalizationStrategy () = default;
+    virtual ~NormalizationStrategy() = default;
 
     /**
      *
diff --git a/src/constants.h b/src/constants.h
index b2e7e81e..0976096b 100644
--- a/src/constants.h
+++ b/src/constants.h
@@ -2,7 +2,7 @@
 #ifndef INC_4NEURO_CONSTANTS_H
 #define INC_4NEURO_CONSTANTS_H
 
-namespace lib4neuro {
+namespace lib4neuro{
     const double E  = 2.7182818284590;
     const double PI = 3.14159265358979323846;
 }
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index d106b92c..0816d644 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -15,15 +15,15 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 50;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -32,18 +32,18 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(ef);
 
@@ -52,16 +52,16 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     /* ERROR CALCULATION */
     std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 }
 
 void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
                                    l4n::ErrorFunction& ef) {
 
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
     l4n::GradientDescentBB gd(1e-6,
                               1000);
 
@@ -76,21 +76,21 @@ void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
 int main() {
 
     std::cout
-            << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons"
-            << std::endl;
+        << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons"
+        << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50" << std::endl;
     std::cout << "1 * w1 + 0.5*w2 = 0.75" << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 1};
     out = {0.5};
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index b3ef28ec..345bbb88 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -14,15 +14,15 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 50;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -31,18 +31,18 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(ef);
 
@@ -50,8 +50,8 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
 
     std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 }
 
 void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
@@ -67,28 +67,28 @@ void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
     /* ERROR CALCULATION */
     std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval(nullptr) << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 }
 
 int main() {
     std::cout
-            << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights"
-            << std::endl;
+        << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << " 0 * w1 + 1 * w2 = 0.50 + b1" << std::endl;
     std::cout << " 1 * w1 + 0.5*w2 = 0.75 + b1" << std::endl;
     std::cout << "(1.25 + b2) * w2 = 0.63 + b3" << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 1, 0};
     out = {0.5, 0};
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index c4532937..d9bc6a7f 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -41,8 +41,8 @@ void calculate_gradient_analytical(std::vector<double>& input,
                                    size_t n_hidden_neurons,
                                    std::vector<double>& gradient_analytical) {
 
-    double a, b, y, x = input[0];
-    for (size_t i = 0; i < n_hidden_neurons; ++i) {
+    double      a, b, y, x = input[0];
+    for (size_t i          = 0; i < n_hidden_neurons; ++i) {
         a = parameter_weights[i];
         b = parameter_biases[i];
         y = parameter_weights[n_hidden_neurons + i];
@@ -50,7 +50,7 @@ void calculate_gradient_analytical(std::vector<double>& input,
         gradient_analytical[i] += y * x * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
         gradient_analytical[n_hidden_neurons + i] += 1.0 / ((1 + std::exp(b - a * x)));
         gradient_analytical[2 * n_hidden_neurons + i] -=
-                y * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
+            y * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
     }
 
 }
@@ -58,7 +58,7 @@ void calculate_gradient_analytical(std::vector<double>& input,
 int main(int argc,
          char** argv) {
 
-    int n_tests = 2;
+    int n_tests          = 2;
     int n_hidden_neurons = 2;
     try {
         /* Numbers of neurons in layers (including input and output layers) */
@@ -71,18 +71,18 @@ int main(int argc,
         std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC,
                                                        l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC,
                                                        l4n::NEURON_TYPE::LOGISTIC}; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
-        l4n::FullyConnectedFFN nn1(&neuron_numbers_in_layers,
-                                   &hidden_type_v);
+        l4n::FullyConnectedFFN        nn1(&neuron_numbers_in_layers,
+                                          &hidden_type_v);
         nn1.randomize_parameters();
 
-        boost::random::mt19937 gen(std::time(0));
+        boost::random::mt19937                     gen(std::time(0));
         boost::random::uniform_real_distribution<> dist(-1,
                                                         1);
 
-        size_t n_parameters = nn1.get_n_weights() + nn1.get_n_biases();
+        size_t              n_parameters = nn1.get_n_weights() + nn1.get_n_biases();
         std::vector<double> gradient_backprogation(n_parameters);
         std::vector<double> gradient_analytical(n_parameters);
-        std::vector<double>* parameter_biases = nn1.get_parameter_ptr_biases();
+        std::vector<double>* parameter_biases  = nn1.get_parameter_ptr_biases();
         std::vector<double>* parameter_weights = nn1.get_parameter_ptr_weights();
         std::vector<double> error_derivative = {1};
 
@@ -93,7 +93,7 @@ int main(int argc,
             std::vector<double> input(1);
             std::vector<double> output(1);
 
-            input[0] = dist(gen);
+            input[0]  = dist(gen);
             output[0] = 0;
 
 
diff --git a/src/examples/net_test_harmonic_oscilator.cpp b/src/examples/net_test_harmonic_oscilator.cpp
index 98f5c5ad..e216200c 100644
--- a/src/examples/net_test_harmonic_oscilator.cpp
+++ b/src/examples/net_test_harmonic_oscilator.cpp
@@ -63,16 +63,16 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
 
     printf("Solution via the particle swarm optimization!\n");
     std::vector<double> domain_bounds(
-            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
+        2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
     double c1 = 1.7;
     double c2 = 1.7;
-    double w = 0.700;
+    double w  = 0.700;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -81,18 +81,18 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        max_iters
     );
 
     solver.solve(swarm);
@@ -122,12 +122,12 @@ void test_harmonic_oscilator_fixed_E(double EE,
                                      size_t n_particles) {
     std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     /* SOLVER SETUP */
-    size_t n_inputs = 1;
-    size_t n_equations = 1;
+    size_t        n_inputs    = 1;
+    size_t        n_equations = 1;
     l4n::DESolver solver(n_equations,
                          n_inputs,
                          n_inner_neurons);
@@ -171,8 +171,8 @@ void test_harmonic_oscilator_fixed_E(double EE,
         data_vec_g.emplace_back(std::make_pair(inp,
                                                out));
     }
-    inp = {0.0};
-    out = {1.0};
+    inp                 = {0.0};
+    out                 = {1.0};
     data_vec_g.emplace_back(std::make_pair(inp,
                                            out));
 
@@ -199,32 +199,32 @@ void test_harmonic_oscilator_fixed_E(double EE,
 int main() {
     std::cout << "Running lib4neuro harmonic Oscilator example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "          Governing equation: -y''(x) + x^2 * y(x) = E * y(x)" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
-            << std::endl;
+        << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
-    double EE = -1.0;
+    double       EE              = -1.0;
     unsigned int n_inner_neurons = 2;
-    unsigned int train_size = 10;
-    double accuracy = 1e-3;
-    double ds = -5.0;
-    double de = 5.0;
+    unsigned int train_size      = 10;
+    double       accuracy        = 1e-3;
+    double       ds              = -5.0;
+    double       de              = 5.0;
 
     unsigned int test_size = 300;
-    double ts = -6.0;
-    double te = 6.0;
+    double       ts        = -6.0;
+    double       te        = 6.0;
 
     size_t particle_swarm_max_iters = 1000;
-    size_t n_particles = 100;
+    size_t n_particles              = 100;
     test_harmonic_oscilator_fixed_E(EE,
                                     accuracy,
                                     n_inner_neurons,
diff --git a/src/examples/net_test_ode_1.cpp b/src/examples/net_test_ode_1.cpp
index 7cc1c579..28c48ec4 100644
--- a/src/examples/net_test_ode_1.cpp
+++ b/src/examples/net_test_ode_1.cpp
@@ -28,16 +28,16 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
 
     printf("Solution via the particle swarm optimization!\n");
     std::vector<double> domain_bounds(
-            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
+        2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
     double c1 = 1.7;
     double c2 = 1.7;
-    double w = 0.700;
+    double w  = 0.700;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -46,18 +46,18 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        max_iters
     );
 
     solver.solve(swarm);
@@ -88,8 +88,8 @@ void export_solution(size_t n_test_points,
                      l4n::MultiIndex& alpha_1,
                      l4n::MultiIndex& alpha_2,
                      const std::string prefix) {
-    l4n::NeuralNetwork* solution = solver.get_solution(alpha_0);
-    l4n::NeuralNetwork* solution_d = solver.get_solution(alpha_1);
+    l4n::NeuralNetwork* solution    = solver.get_solution(alpha_0);
+    l4n::NeuralNetwork* solution_d  = solver.get_solution(alpha_1);
     l4n::NeuralNetwork* solution_dd = solver.get_solution(alpha_2);
 
     /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
@@ -145,8 +145,8 @@ void export_solution(size_t n_test_points,
     ofs.close();
 
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
 }
 
@@ -163,12 +163,12 @@ void test_ode(double accuracy,
 
     std::cout << "Finding a solution via the Particle Swarm Optimization and Gradient descent method!" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     /* SOLVER SETUP */
-    size_t n_inputs = 1;
-    size_t n_equations = 3;
+    size_t        n_inputs    = 1;
+    size_t        n_equations = 3;
     l4n::DESolver solver_01(n_equations,
                             n_inputs,
                             n_inner_neurons);
@@ -210,7 +210,7 @@ void test_ode(double accuracy,
 
     /* TRAIN DATA FOR THE GOVERNING DE */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
-    std::vector<double> test_points(train_size);
+    std::vector<double>                                              test_points(train_size);
 
 
     /* ISOTROPIC TRAIN SET */
@@ -219,7 +219,7 @@ void test_ode(double accuracy,
         inp[0] = frac * i;
         out[0] = 0.0;
         data_vec_g.push_back(std::make_pair(inp,
-                                               out));
+                                            out));
 
         test_points[i] = inp[0];
     }
@@ -262,7 +262,7 @@ void test_ode(double accuracy,
     auto start = std::chrono::system_clock::now();
 
     optimize_via_gradient_descent(solver_01,
-                                 accuracy);
+                                  accuracy);
     export_solution(n_test_points,
                     te,
                     ts,
@@ -272,7 +272,7 @@ void test_ode(double accuracy,
                     alpha_2,
                     "gradient_");
 
-    auto end = std::chrono::system_clock::now();
+    auto                          end             = std::chrono::system_clock::now();
     std::chrono::duration<double> elapsed_seconds = end - start;
     std::cout << "elapsed time: " << elapsed_seconds.count() << std::endl;
 }
@@ -280,33 +280,33 @@ void test_ode(double accuracy,
 int main() {
     std::cout << "Running lib4neuro Ordinary Differential Equation example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "          Governing equation: y''(x) + 4y'(x) + 4y(x) = 0.0, for x in [0, 4]" << std::endl;
     std::cout << "Dirichlet boundary condition:                  y(0.0) = 1.0" << std::endl;
     std::cout << "  Neumann boundary condition:                 y'(0.0) = 1.0" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
-            << std::endl;
+        << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     unsigned int n_inner_neurons = 2;
-    unsigned int train_size = 10;
-    double accuracy = 1e-1;
-    double ds = 0.0;
-    double de = 4.0;
+    unsigned int train_size      = 10;
+    double       accuracy        = 1e-1;
+    double       ds              = 0.0;
+    double       de              = 4.0;
 
     unsigned int test_size = 10;
-    double ts = ds;
-    double te = de + 2;
+    double       ts        = ds;
+    double       te        = de + 2;
 
     size_t particle_swarm_max_iters = 10;
-    size_t n_particles = 2;
+    size_t n_particles              = 2;
 
     test_ode(accuracy,
              n_inner_neurons,
diff --git a/src/examples/net_test_pde_1.cpp b/src/examples/net_test_pde_1.cpp
index ffc9eca1..b69cd919 100644
--- a/src/examples/net_test_pde_1.cpp
+++ b/src/examples/net_test_pde_1.cpp
@@ -31,16 +31,16 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
 
     printf("Solution via the particle swarm optimization!\n");
     std::vector<double> domain_bounds(
-            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
+        2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
     double c1 = 1.7;
     double c2 = 1.7;
-    double w = 0.700;
+    double w  = 0.700;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -49,18 +49,18 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        max_iters
     );
 
     solver.solve(swarm);
@@ -84,8 +84,8 @@ void export_solution(size_t n_test_points,
                      l4n::MultiIndex& alpha_01,
                      l4n::MultiIndex& alpha_20,
                      const std::string prefix) {
-    l4n::NeuralNetwork* solution = solver.get_solution(alpha_00);
-    l4n::NeuralNetwork* solution_t = solver.get_solution(alpha_01);
+    l4n::NeuralNetwork* solution    = solver.get_solution(alpha_00);
+    l4n::NeuralNetwork* solution_t  = solver.get_solution(alpha_01);
     l4n::NeuralNetwork* solution_xx = solver.get_solution(alpha_20);
 
     size_t i, j;
@@ -105,13 +105,13 @@ void export_solution(size_t n_test_points,
     std::cout.flush();
 
     std::vector<double> input(2), output(1), output_t(1), output_xx(1);
-    std::ofstream ofs(final_fn,
-                      std::ofstream::out);
-    double frac = (te - ts) / (n_test_points - 1);
+    std::ofstream       ofs(final_fn,
+                            std::ofstream::out);
+    double              frac        = (te - ts) / (n_test_points - 1);
     for (i = 0; i < n_test_points; ++i) {
-        x = i * frac + ts;
+        x      = i * frac + ts;
         for (j = 0; j < n_test_points; ++j) {
-            t = j * frac + ts;
+            t     = j * frac + ts;
             input = {x, t};
 
             solution->eval_single(input,
@@ -142,9 +142,9 @@ void export_solution(size_t n_test_points,
            final_fn.c_str(),
            0.0);
     for (i = 0; i < n_test_points; ++i) {
-        x = i * frac + ts;
+        x      = i * frac + ts;
         for (j = 0; j < n_test_points; ++j) {
-            t = j * frac + ts;
+            t     = j * frac + ts;
             input = {x, t};
 
             solution_t->eval_single(input,
@@ -220,8 +220,8 @@ void export_solution(size_t n_test_points,
     ofs.close();
 
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 }
 
 void test_pde(double accuracy,
@@ -236,8 +236,8 @@ void test_pde(double accuracy,
               size_t n_particles) {
 
     /* do not change below */
-    size_t n_inputs = 2;
-    size_t n_equations = 3;
+    size_t        n_inputs    = 2;
+    size_t        n_equations = 3;
     l4n::DESolver solver_01(n_equations,
                             n_inputs,
                             n_inner_neurons);
@@ -286,10 +286,10 @@ void test_pde(double accuracy,
             inp = {frac * j, frac * i};
             out = {0.0};
             data_vec_zero.emplace_back(std::make_pair(inp,
-                                                   out));
+                                                      out));
         }
     }
-    l4n::DataSet ds_00(&data_vec_zero);
+    l4n::DataSet      ds_00(&data_vec_zero);
 
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_t;
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_x;
@@ -355,37 +355,37 @@ void test_pde(double accuracy,
 int main() {
     std::cout << "Running lib4neuro Partial Differential Equation example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "          Governing equation: y_xx - y_t = 0,                                   for (x, t) in [0, 1] x [0, 1]"
-            << std::endl;
+        << "          Governing equation: y_xx - y_t = 0,                                   for (x, t) in [0, 1] x [0, 1]"
+        << std::endl;
     std::cout << "Dirichlet boundary condition:    y(0, t) = sin(t),                              for t in [0, 1]"
               << std::endl;
     std::cout << "Dirichlet boundary condition:    y(x, 0) = exp(-sqrt(0.5)x) * sin(-sqrt(0.5)x), for x in [0, 1]"
               << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons"
-            << std::endl;
+        << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     unsigned int n_inner_neurons = 2;
-    unsigned int train_size = 5;
-    double accuracy = 1e-1;
-    double ds = 0.0;
-    double de = 1.0;
+    unsigned int train_size      = 5;
+    double       accuracy        = 1e-1;
+    double       ds              = 0.0;
+    double       de              = 1.0;
 
     unsigned int test_size = 10;
-    double ts = ds;
-    double te = de + 0;
+    double       ts        = ds;
+    double       te        = de + 0;
 
     size_t particle_swarm_max_iters = 10;
-    size_t n_particles = 5;
+    size_t n_particles              = 5;
     test_pde(accuracy,
              n_inner_neurons,
              train_size,
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index b3a18159..bae85fcc 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -12,24 +12,24 @@
 int main() {
     std::cout << "Running lib4neuro Serialization example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "First, it finds an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50 + b" << std::endl;
     std::cout << "1 * w1 + 0.5*w2 = 0.75 + b" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "Then it stores the network with its weights into a file via serialization" << std::endl;
     std::cout << "Then it loads the network from a file via serialization" << std::endl;
     std::cout << "Finally it tests the loaded network parameters by evaluating the error function" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 1};
     out = {0.5};
@@ -97,15 +97,15 @@ int main() {
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 5;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -114,18 +114,18 @@ int main() {
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(mse);
 
@@ -142,14 +142,14 @@ int main() {
 
     /* SAVE NETWORK TO THE FILE */
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "Network generated by the example" << std::endl;
     net.write_stats();
     net.save_text("saved_network.4nt");
     std::cout
-            << "--------------------------------------------------------------------------------------------------------------------------------------------"
-            << std::endl;
+        << "--------------------------------------------------------------------------------------------------------------------------------------------"
+        << std::endl;
     double error = 0.0;
     inp = {0, 1};
     net.eval_single(inp,
@@ -164,17 +164,17 @@ int main() {
     std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
     std::cout << "Error of the network: " << 0.5 * error << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     std::cout << "Network loaded from a file" << std::endl;
     l4n::NeuralNetwork net2("saved_network.4nt");
     net2.write_stats();
     std::cout
-            << "--------------------------------------------------------------------------------------------------------------------------------------------"
-            << std::endl;
+        << "--------------------------------------------------------------------------------------------------------------------------------------------"
+        << std::endl;
     error = 0.0;
-    inp = {0, 1};
+    inp   = {0, 1};
     net2.eval_single(inp,
                      out);
     error += (0.5 - out[0]) * (0.5 - out[0]);
@@ -187,7 +187,7 @@ int main() {
     std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
     std::cout << "Error of the network: " << 0.5 * error << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     return 0;
 }
diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp
index c883afd4..11926803 100644
--- a/src/examples/seminar.cpp
+++ b/src/examples/seminar.cpp
@@ -16,26 +16,26 @@ int main() {
 
     std::cout << std::endl << "Running lib4neuro Moldyn Seminar example" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
 
-    l4n::NeuralNetwork XOR;
+    l4n::NeuralNetwork                 XOR;
     std::shared_ptr<l4n::NeuronLinear> in1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> in2 = std::make_shared<l4n::NeuronLinear>();
-    size_t i1 = XOR.add_neuron(in1,
-                               l4n::BIAS_TYPE::NO_BIAS);
-    size_t i2 = XOR.add_neuron(in2,
-                               l4n::BIAS_TYPE::NO_BIAS);
+    size_t                             i1  = XOR.add_neuron(in1,
+                                                            l4n::BIAS_TYPE::NO_BIAS);
+    size_t                             i2  = XOR.add_neuron(in2,
+                                                            l4n::BIAS_TYPE::NO_BIAS);
 
     std::shared_ptr<l4n::NeuronLogistic> hn1 = std::make_shared<l4n::NeuronLogistic>();
     std::shared_ptr<l4n::NeuronLogistic> hn2 = std::make_shared<l4n::NeuronLogistic>();
-    size_t h1 = XOR.add_neuron(hn1);
-    size_t h2 = XOR.add_neuron(hn2);
+    size_t                               h1  = XOR.add_neuron(hn1);
+    size_t                               h2  = XOR.add_neuron(hn2);
 
     std::shared_ptr<l4n::NeuronLinear> on1 = std::make_shared<l4n::NeuronLinear>();
-    size_t o1 = XOR.add_neuron(on1,
-                               l4n::BIAS_TYPE::NO_BIAS);
+    size_t                             o1  = XOR.add_neuron(on1,
+                                                            l4n::BIAS_TYPE::NO_BIAS);
 
     XOR.add_connection_simple(i1,
                               h1);
@@ -54,7 +54,7 @@ int main() {
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 0};
     out = {0};
@@ -100,15 +100,15 @@ int main() {
     std::vector<double> domain_bounds(2 * (XOR.get_n_weights() + XOR.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 5;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -117,18 +117,18 @@ int main() {
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(mse);
 
diff --git a/src/examples/x2_fitting.cpp b/src/examples/x2_fitting.cpp
index 86936ae3..82821541 100644
--- a/src/examples/x2_fitting.cpp
+++ b/src/examples/x2_fitting.cpp
@@ -9,15 +9,15 @@ int main() {
                           true);
     reader.read();
 
-    std::vector<unsigned int> input_ind = {0};
-    std::vector<unsigned int> output_ind = {1};
-    std::shared_ptr<l4n::DataSet> ds = reader.get_data_set(&input_ind,
-                                                           &output_ind);
-
-    std::vector<unsigned int> neuron_numbers_in_layers = {1, 15, 1};
-    std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC};
-    l4n::FullyConnectedFFN net(&neuron_numbers_in_layers,
-                               &hidden_type_v);
+    std::vector<unsigned int>     input_ind  = {0};
+    std::vector<unsigned int>     output_ind = {1};
+    std::shared_ptr<l4n::DataSet> ds         = reader.get_data_set(&input_ind,
+                                                                   &output_ind);
+
+    std::vector<unsigned int>     neuron_numbers_in_layers = {1, 15, 1};
+    std::vector<l4n::NEURON_TYPE> hidden_type_v            = {l4n::NEURON_TYPE::LOGISTIC};
+    l4n::FullyConnectedFFN        net(&neuron_numbers_in_layers,
+                                      &hidden_type_v);
 
     l4n::MSE mse(&net,
                  ds.get());
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 3e9198fc..cd405224 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -2,57 +2,57 @@
 # UNIT TESTS #
 ##############
 
-add_executable(linear_neuron_test NeuronLinear_test.cpp)
-target_link_libraries(linear_neuron_test lib4neuro boost_unit_test)
-target_include_directories(linear_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(linear_neuron_test NeuronLinear_test.cpp)
+TARGET_LINK_LIBRARIES(linear_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(linear_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(constant_neuron_test NeuronConstant_test.cpp)
-target_link_libraries(constant_neuron_test lib4neuro boost_unit_test)
-target_include_directories(constant_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(constant_neuron_test NeuronConstant_test.cpp)
+TARGET_LINK_LIBRARIES(constant_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(constant_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(binary_neuron_test NeuronBinary_test.cpp)
-target_link_libraries(binary_neuron_test lib4neuro boost_unit_test)
-target_include_directories(binary_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(binary_neuron_test NeuronBinary_test.cpp)
+TARGET_LINK_LIBRARIES(binary_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(binary_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(logistic_neuron_test NeuronLogistic_test.cpp)
-target_link_libraries(logistic_neuron_test lib4neuro boost_unit_test)
-target_include_directories(logistic_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(logistic_neuron_test NeuronLogistic_test.cpp)
+TARGET_LINK_LIBRARIES(logistic_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(logistic_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
-target_link_libraries(connectionFunctionGeneral_test lib4neuro boost_unit_test)
-target_include_directories(connectionFunctionGeneral_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
+TARGET_LINK_LIBRARIES(connectionFunctionGeneral_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(connectionFunctionGeneral_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(neural_network_test NeuralNetwork_test.cpp)
-target_link_libraries(neural_network_test lib4neuro boost_unit_test)
-target_include_directories(neural_network_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(neural_network_test NeuralNetwork_test.cpp)
+TARGET_LINK_LIBRARIES(neural_network_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(neural_network_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
-target_link_libraries(connection_Function_identity_test lib4neuro boost_unit_test)
-target_include_directories(connection_Function_identity_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
+TARGET_LINK_LIBRARIES(connection_Function_identity_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(connection_Function_identity_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(dataset_test DataSet_test.cpp)
-target_link_libraries(dataset_test lib4neuro boost_unit_test)
-target_include_directories(dataset_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(dataset_test DataSet_test.cpp)
+TARGET_LINK_LIBRARIES(dataset_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(dataset_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(errorfunction_test ErrorFunctions_test.cpp)
-target_link_libraries(errorfunction_test lib4neuro boost_unit_test)
-target_include_directories(errorfunction_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(errorfunction_test ErrorFunctions_test.cpp)
+TARGET_LINK_LIBRARIES(errorfunction_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(errorfunction_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(particle_swarm_test ParticleSwarm_test.cpp)
-target_link_libraries(particle_swarm_test lib4neuro boost_unit_test)
-target_include_directories(particle_swarm_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(particle_swarm_test ParticleSwarm_test.cpp)
+TARGET_LINK_LIBRARIES(particle_swarm_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(particle_swarm_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(particle_test Particle_test.cpp)
-target_link_libraries(particle_test lib4neuro boost_unit_test)
-target_include_directories(particle_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(particle_test Particle_test.cpp)
+TARGET_LINK_LIBRARIES(particle_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(particle_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
-target_link_libraries(NeuralNetworkSum_test lib4neuro boost_unit_test)
-target_include_directories(NeuralNetworkSum_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
+TARGET_LINK_LIBRARIES(NeuralNetworkSum_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(NeuralNetworkSum_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(DESolver_test DESolver_test.cpp)
-target_link_libraries(DESolver_test lib4neuro boost_unit_test)
-target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(DESolver_test DESolver_test.cpp)
+TARGET_LINK_LIBRARIES(DESolver_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 #TODO fix GradientDescent test
 #add_executable(GradientDescent_test GradientDescent_test.cpp)
@@ -60,41 +60,41 @@ target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_
 #target_include_directories(GradientDescent_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 
-set(TEST_OUTPUT_DIR ${PROJECT_BINARY_DIR}/tests)
-
-SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
-SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
-SET( CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
-SET( CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
-SET( CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
-SET( CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
-
-set_target_properties(
-        linear_neuron_test
-        constant_neuron_test
-        binary_neuron_test
-        logistic_neuron_test
-        connectionFunctionGeneral_test
-        connection_Function_identity_test
-        neural_network_test
-        dataset_test
-        particle_swarm_test
-        particle_test
-        NeuralNetworkSum_test
-        errorfunction_test
-        DESolver_test
-        #    GradientDescent_test
-
-
-        PROPERTIES
-        ARCHIVE_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}> 
-        LIBRARY_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
-        RUNTIME_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
-        #CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
-        #CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
-        #CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
-        #CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
-        #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
-        #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
+SET(TEST_OUTPUT_DIR ${PROJECT_BINARY_DIR}/tests)
+
+SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
+SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
+SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
+SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
+SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
+SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
+
+SET_TARGET_PROPERTIES(
+    linear_neuron_test
+    constant_neuron_test
+    binary_neuron_test
+    logistic_neuron_test
+    connectionFunctionGeneral_test
+    connection_Function_identity_test
+    neural_network_test
+    dataset_test
+    particle_swarm_test
+    particle_test
+    NeuralNetworkSum_test
+    errorfunction_test
+    DESolver_test
+    #    GradientDescent_test
+
+
+    PROPERTIES
+    ARCHIVE_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
+    LIBRARY_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
+    RUNTIME_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
+    #CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
+    #CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
+    #CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
+    #CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
+    #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
+    #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
 )
 
diff --git a/src/tests/ConnectionFunctionGeneral_test.cpp b/src/tests/ConnectionFunctionGeneral_test.cpp
index f85ec129..36d6f86b 100644
--- a/src/tests/ConnectionFunctionGeneral_test.cpp
+++ b/src/tests/ConnectionFunctionGeneral_test.cpp
@@ -35,7 +35,7 @@ BOOST_AUTO_TEST_SUITE(Connection_test)
         param_indices.push_back(0);
         std::string paramToFunction = "this do nothing! Why is it here?";
         BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral* functionGeneral = new ConnectionFunctionGeneral(param_indices,
-                                     paramToFunction));
+                                 paramToFunction));
     }
 
 
diff --git a/src/tests/DESolver_test.cpp b/src/tests/DESolver_test.cpp
index f2c5ca0e..aaf1bd52 100644
--- a/src/tests/DESolver_test.cpp
+++ b/src/tests/DESolver_test.cpp
@@ -126,7 +126,7 @@ BOOST_AUTO_TEST_SUITE(DESolver_test)
                           std::invalid_argument);
         BOOST_CHECK_NO_THROW(DESolver deSolver(1,
                                                1,
-                                     1));
+                                 1));
 
         //TODO fix it
         //std::stringstream buffer1;
-- 
GitLab