From 2c433965578c503cede626e0e5a3c5a6f6b29511 Mon Sep 17 00:00:00 2001
From: Martin Beseda <martin.beseda@vsb.cz>
Date: Fri, 29 Mar 2019 16:07:51 +0100
Subject: [PATCH] [CODE] Reformatted source codes.

---
 src/CSVReader/CSVReader.cpp                   |  24 +--
 src/CrossValidator/CrossValidator.cpp         |   2 +-
 src/DataSet/DataSet.cpp                       |  45 ++---
 src/ErrorFunction/ErrorFunctions.cpp          |  42 ++---
 src/ErrorFunction/ErrorFunctions.h            |   6 +-
 src/ErrorFunction/ErrorFunctionsMock.h        |  12 +-
 src/General/ExprtkWrapperSerialization.h      |   4 +-
 src/LearningMethods/GradientDescent.cpp       |  56 +++----
 src/LearningMethods/GradientDescent.h         |  12 +-
 src/LearningMethods/GradientDescentBB.cpp     |  38 ++---
 .../GradientDescentSingleItem.cpp             |  18 +-
 src/LearningMethods/LearningSequence.cpp      |   6 +-
 src/LearningMethods/LevenbergMarquardt.cpp    |  52 +++---
 src/LearningMethods/ParticleSwarm.cpp         |  84 +++++-----
 src/LearningMethods/ParticleSwarm.h           |  36 ++--
 .../ConnectionFunctionIdentity.cpp            |   2 +-
 src/Network/NeuralNetwork.cpp                 |  93 +++++------
 src/Network/NeuralNetworkSum.cpp              |   2 +-
 src/Neuron/NeuronLogistic.cpp                 |  42 ++---
 src/Solvers/DESolver.cpp                      |  54 +++---
 src/Solvers/DESolver.h                        |   6 +-
 src/constants.h                               |   2 +-
 src/examples/CMakeLists.txt                   | 154 +++++++++---------
 src/examples/dev_sandbox.cpp                  |   8 +-
 src/examples/net_test_1.cpp                   |  52 +++---
 src/examples/net_test_2.cpp                   |  52 +++---
 src/examples/net_test_3.cpp                   |  20 +--
 src/examples/net_test_harmonic_oscilator.cpp  |  70 ++++----
 src/examples/net_test_ode_1.cpp               |  76 ++++-----
 src/examples/net_test_pde_1.cpp               |  88 +++++-----
 src/examples/network_serialization.cpp        |  66 ++++----
 src/examples/seminar.cpp                      |  54 +++---
 src/examples/x2_fitting.cpp                   |  18 +-
 src/tests/CMakeLists.txt                      | 134 +++++++--------
 src/tests/ConnectionFunctionGeneral_test.cpp  |   2 +-
 src/tests/DESolver_test.cpp                   |   2 +-
 src/tests/DataSet_test.cpp                    |   2 +-
 src/tests/ErrorFunctions_test.cpp             |  24 +--
 src/tests/NeuralNetworkSum_test.cpp           |  12 +-
 src/tests/NeuralNetwork_test.cpp              |  10 +-
 src/tests/ParticleSwarm_test.cpp              |   2 +-
 src/tests/Particle_test.cpp                   |   6 +-
 42 files changed, 747 insertions(+), 743 deletions(-)

diff --git a/src/CSVReader/CSVReader.cpp b/src/CSVReader/CSVReader.cpp
index 68eed16f..e51807ba 100644
--- a/src/CSVReader/CSVReader.cpp
+++ b/src/CSVReader/CSVReader.cpp
@@ -22,15 +22,15 @@ namespace lib4neuro {
             THROW_RUNTIME_ERROR("The file path \'" + file_path + "\' specified in CSVReader is not accessible!");
         }
 
-        this->file_path = file_path;
-        this->delimiter = delimiter;
+        this->file_path         = file_path;
+        this->delimiter         = delimiter;
         this->ignore_first_line = ignore_first_line;
-        this->header_included = ignore_first_line;
+        this->header_included   = ignore_first_line;
     }
 
     void CSVReader::read() {
         std::ifstream ifs(this->file_path);
-        std::string line;
+        std::string   line;
 
         if (this->ignore_first_line) {
             std::getline(ifs,
@@ -47,8 +47,8 @@ namespace lib4neuro {
             }
 
             /* Separate elements of the line according to the delimiter */
-            size_t last = 0;
-            size_t next = 0;
+            size_t                   last = 0;
+            size_t                   next = 0;
             std::vector<std::string> separated_line;
             while ((next = line.find(this->delimiter,
                                      last)) != std::string::npos) {
@@ -91,7 +91,7 @@ namespace lib4neuro {
         for (auto line : this->data) {
             //TODO check empty values in data
             std::vector<double> input;
-            for (auto ind : *input_col_indices) {
+            for (auto           ind : *input_col_indices) {
                 std::string s;
 
                 try {
@@ -113,17 +113,19 @@ namespace lib4neuro {
                     /* Add loaded number to the vector of inputs */
                     input.push_back(tmp);
 
-                } catch (const std::out_of_range& e) {
+                }
+                catch (const std::out_of_range& e) {
                     THROW_OUT_OF_RANGE_ERROR("Non-existing index specified (" + std::to_string(ind) + ")!");
 
-                } catch (const boost::bad_lexical_cast& e) {
+                }
+                catch (const boost::bad_lexical_cast& e) {
                     THROW_RUNTIME_ERROR(
-                            "Value \"" + s + "\" is not numerical and so it cannot be used in Data Set!");
+                        "Value \"" + s + "\" is not numerical and so it cannot be used in Data Set!");
                 }
             }
 
             std::vector<double> output;
-            for (auto ind : *output_col_indices) {
+            for (auto           ind : *output_col_indices) {
                 output.emplace_back(std::stod(line.at(ind)));
             }
 
diff --git a/src/CrossValidator/CrossValidator.cpp b/src/CrossValidator/CrossValidator.cpp
index 0d255bb8..5ba768f5 100644
--- a/src/CrossValidator/CrossValidator.cpp
+++ b/src/CrossValidator/CrossValidator.cpp
@@ -6,7 +6,7 @@ namespace lib4neuro {
     LIB4NEURO_API CrossValidator::CrossValidator(LearningMethod* optimizer,
                                                  ErrorFunction* ef) {
         this->optimizer = optimizer;
-        this->ef = ef;
+        this->ef        = ef;
     }
 
     LIB4NEURO_API void CrossValidator::run_k_fold_test(unsigned int k,
diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index e9046569..6b0b117f 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -10,9 +10,9 @@ BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::DataSet);
 namespace lib4neuro {
 
     DataSet::DataSet() {
-        this->n_elements = 0;
-        this->input_dim = 0;
-        this->output_dim = 0;
+        this->n_elements             = 0;
+        this->input_dim              = 0;
+        this->output_dim             = 0;
         this->normalization_strategy = std::make_shared<DoubleUnitStrategy>(DoubleUnitStrategy());
     }
 
@@ -22,10 +22,11 @@ namespace lib4neuro {
             try {
                 boost::archive::text_iarchive ia(ifs);
                 ia >> *this;
-            } catch (boost::archive::archive_exception& e) {
+            }
+            catch (boost::archive::archive_exception& e) {
                 THROW_RUNTIME_ERROR(
-                        "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
-                                                                   "the serialized DataSet.");
+                    "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
+                                                               "the serialized DataSet.");
             }
             ifs.close();
         } else {
@@ -40,8 +41,8 @@ namespace lib4neuro {
                      NormalizationStrategy* ns) {
         this->data.clear();
         this->n_elements = data_ptr->size();
-        this->data = *data_ptr;
-        this->input_dim = this->data[0].first.size();
+        this->data       = *data_ptr;
+        this->input_dim  = this->data[0].first.size();
         this->output_dim = this->data[0].second.size();
 
         if (ns) {
@@ -59,9 +60,9 @@ namespace lib4neuro {
                      double output,
                      NormalizationStrategy* ns) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
-        this->data = new_data_vec;
+        this->data       = new_data_vec;
         this->n_elements = 0;
-        this->input_dim = 1;
+        this->input_dim  = 1;
         this->output_dim = 1;
 
         if (ns) {
@@ -81,8 +82,8 @@ namespace lib4neuro {
                      unsigned int output_dim,
                      NormalizationStrategy* ns) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
-        this->data = new_data_vec;
-        this->input_dim = bounds.size() / 2;
+        this->data       = new_data_vec;
+        this->input_dim  = bounds.size() / 2;
         this->output_dim = output_dim;
         this->n_elements = 0;
 
@@ -104,7 +105,7 @@ namespace lib4neuro {
     void DataSet::add_data_pair(std::vector<double>& inputs,
                                 std::vector<double>& outputs) {
         if (this->n_elements == 0 && this->input_dim == 0 && this->output_dim == 0) {
-            this->input_dim = inputs.size();
+            this->input_dim  = inputs.size();
             this->output_dim = outputs.size();
         }
 
@@ -157,8 +158,8 @@ namespace lib4neuro {
         // TODO add check of dataset dimensions
 
         std::vector<std::vector<double>> grid;
-        std::vector<double> tmp;
-        double frac;
+        std::vector<double>              tmp;
+        double                           frac;
         if (no_elems_in_one_dim < 1) {
             THROW_INVALID_ARGUMENT_ERROR("Number of elements in one dimension has to be >=1 !");
         }
@@ -276,7 +277,7 @@ namespace lib4neuro {
     template<class T>
     std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>>* v) {
         std::vector<std::vector<double>> v_combined_old, v_combined, v_tmp;
-        std::vector<double> tmp;
+        std::vector<double>              tmp;
 
         for (const auto& e : v->at(0)) {
             tmp = {e};
@@ -318,12 +319,12 @@ namespace lib4neuro {
             this->max_min_inp_val.emplace_back(this->data.at(0).first.at(0));
         }
 
-        double tmp, tmp2;
+        double    tmp, tmp2;
         for (auto pair : this->data) {
             /* Finding maximum */
             //TODO make more efficiently
-            tmp = *std::max_element(pair.first.begin(),
-                                    pair.first.end());
+            tmp  = *std::max_element(pair.first.begin(),
+                                     pair.first.end());
             tmp2 = *std::max_element(pair.second.begin(),
                                      pair.second.end());
 
@@ -336,8 +337,8 @@ namespace lib4neuro {
             }
 
             /* Finding minimum */
-            tmp = *std::min_element(pair.first.begin(),
-                                    pair.first.end());
+            tmp  = *std::min_element(pair.first.begin(),
+                                     pair.first.end());
             tmp2 = *std::min_element(pair.second.begin(),
                                      pair.second.end());
 
@@ -467,7 +468,7 @@ namespace lib4neuro {
                                                 this->data.size()) + 1;
             n_chosen = max;
             std::vector<size_t> chosens;
-            size_t chosen;
+            size_t              chosen;
 
             for (size_t i = 0; i < n_chosen; i++) {
                 chosen = rand() % this->data.size();
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 7d520471..3a6dd334 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -22,7 +22,7 @@ namespace lib4neuro {
         this->ds_full = this->ds;
 
         /* Choose random subset of the DataSet for training and the remaining part for validation */
-        boost::random::mt19937 gen;
+        boost::random::mt19937                    gen;
         boost::random::uniform_int_distribution<> dist(0,
                                                        ds_size - 1);
 
@@ -75,7 +75,7 @@ namespace lib4neuro {
                   0.0);
 
         std::vector<std::vector<double>> jac_loc;
-        for (auto item: *this->ds->get_data()) {
+        for (auto                        item: *this->ds->get_data()) {
 
             this->nets[0]->get_jacobian(jac_loc,
                                         item,
@@ -97,7 +97,7 @@ namespace lib4neuro {
     MSE::MSE(NeuralNetwork* net,
              DataSet* ds) {
         this->nets.push_back(net);
-        this->ds = ds;
+        this->ds        = ds;
         this->dimension = net->get_n_weights() + net->get_n_biases();
     }
 
@@ -124,16 +124,16 @@ namespace lib4neuro {
                                  std::vector<double>* weights,
                                  bool verbose
     ) {
-        size_t dim_in = data_set->get_input_dim();
+        size_t dim_in  = data_set->get_input_dim();
         size_t dim_out = data_set->get_output_dim();
-        double error = 0.0, val, output_norm = 0;
+        double error   = 0.0, val, output_norm = 0;
 
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = data_set->get_data();
         size_t n_elements = data->size();
 
         //TODO instead use something smarter
         std::vector<std::vector<double>> outputs(data->size());
-        std::vector<double> output(dim_out);
+        std::vector<double>              output(dim_out);
 
         if (verbose) {
             COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
@@ -170,12 +170,12 @@ namespace lib4neuro {
         double denormalized_real_output;
 
         std::string separator = "";
-        for (size_t i = 0; i < data->size(); i++) {
+        for (size_t i         = 0; i < data->size(); i++) {
 
             /* Compute difference for every element of the output vector */
 #ifdef L4N_DEBUG
             std::stringstream ss_input;
-            for (size_t j = 0; j < dim_in; j++) {
+            for (size_t       j = 0; j < dim_in; j++) {
                 denormalized_real_input = data_set->get_denormalized_value(data->at(i).first.at(j));
                 ss_input << separator << denormalized_real_input;
                 separator = ",";
@@ -187,10 +187,10 @@ namespace lib4neuro {
 
             double loc_error = 0;
             output_norm = 0;
-            separator = "";
+            separator   = "";
             for (size_t j = 0; j < dim_out; ++j) {
                 denormalized_real_output = data_set->get_denormalized_value(data->at(i).second.at(j));
-                denormalized_output = data_set->get_denormalized_value(outputs.at(i).at(j));
+                denormalized_output      = data_set->get_denormalized_value(outputs.at(i).at(j));
 
 #ifdef L4N_DEBUG
                 ss_real_output << separator << denormalized_real_output;
@@ -316,7 +316,7 @@ namespace lib4neuro {
                                   double alpha,
                                   size_t batch) {
 
-        size_t dim_out = this->ds->get_output_dim();
+        size_t dim_out    = this->ds->get_output_dim();
         size_t n_elements = this->ds->get_n_elements();
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
 
@@ -362,8 +362,8 @@ namespace lib4neuro {
 
         //TODO check input vector sizes - they HAVE TO be allocated before calling this function
 
-        size_t n_parameters = this->get_dimension();
-        std::vector<double> parameters = this->get_parameters();
+        size_t              n_parameters = this->get_dimension();
+        std::vector<double> parameters   = this->get_parameters();
 
         double delta;  // Complete step size
         double former_parameter_value;
@@ -371,7 +371,7 @@ namespace lib4neuro {
         double f_val2;  // f(x - delta)
 
         for (size_t i = 0; i < n_parameters; i++) {
-            delta = h * (1 + std::abs(parameters.at(i)));
+            delta                  = h * (1 + std::abs(parameters.at(i)));
             former_parameter_value = parameters.at(i);
 
             if (delta != 0) {
@@ -413,7 +413,7 @@ namespace lib4neuro {
                                 double alpha,
                                 size_t batch) {
 
-        size_t dim_out = this->ds->get_output_dim();
+        size_t dim_out    = this->ds->get_output_dim();
         size_t n_elements = this->ds->get_n_elements();
         std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
 
@@ -503,7 +503,7 @@ namespace lib4neuro {
 
         for (size_t j = 0; j < error_vector.size(); ++j) {
             error_vector.at(j) =
-                    2.0 * (error_vector.at(j) - this->ds->get_data()->at(i).second.at(j)); //real - expected result
+                2.0 * (error_vector.at(j) - this->ds->get_data()->at(i).second.at(j)); //real - expected result
         }
 
         return sqrt(output);
@@ -512,10 +512,10 @@ namespace lib4neuro {
 
     std::vector<double> MSE::get_parameters() {
         std::vector<double> output(this->get_dimension());
-        for (size_t i = 0; i < this->nets[0]->get_n_weights(); ++i) {
+        for (size_t         i = 0; i < this->nets[0]->get_n_weights(); ++i) {
             output[i] = this->nets[0]->get_parameter_ptr_weights()->at(i);
         }
-        for (size_t i = 0; i < this->nets[0]->get_n_biases(); ++i) {
+        for (size_t         i = 0; i < this->nets[0]->get_n_biases(); ++i) {
             output[i + this->nets[0]->get_n_weights()] = this->nets[0]->get_parameter_ptr_biases()->at(i);
         }
         return output;
@@ -543,7 +543,7 @@ namespace lib4neuro {
     }
 
     ErrorSum::ErrorSum() {
-        this->summand = nullptr;
+        this->summand   = nullptr;
         this->dimension = 0;
     }
 
@@ -639,13 +639,13 @@ namespace lib4neuro {
                                              std::vector<double>* parameter_vector,
                                              std::vector<double>& error_vector) {
         double output = 0.0;
-        ErrorFunction* ef = nullptr;
+        ErrorFunction* ef     = nullptr;
         std::fill(error_vector.begin(),
                   error_vector.end(),
                   0);
 
         std::vector<double> error_vector_mem(error_vector.size());
-        for (size_t j = 0; j < this->summand->size(); ++j) {
+        for (size_t         j = 0; j < this->summand->size(); ++j) {
             ef = this->summand->at(i);
 
             if (ef) {
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 36911e94..f15e10d2 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -682,8 +682,8 @@ namespace lib4neuro {
          * @param rhs
          */
         LIB4NEURO_API virtual void get_jacobian_and_rhs(
-                std::vector<std::vector<double>>& jacobian,
-                std::vector<double>& rhs) override;
+            std::vector<std::vector<double>>& jacobian,
+            std::vector<double>& rhs) override;
 
         /**
          *
@@ -693,7 +693,7 @@ namespace lib4neuro {
 
     protected:
         std::vector<ErrorFunction*>* summand;
-        std::vector<double> summand_coefficient;
+        std::vector<double>        summand_coefficient;
     };
 }
 
diff --git a/src/ErrorFunction/ErrorFunctionsMock.h b/src/ErrorFunction/ErrorFunctionsMock.h
index 489867c0..b1735436 100644
--- a/src/ErrorFunction/ErrorFunctionsMock.h
+++ b/src/ErrorFunction/ErrorFunctionsMock.h
@@ -55,37 +55,37 @@ MOCK_BASE_CLASS(mock_ErrorFunction,
     MOCK_METHOD(eval_on_test_data,
                 2,
                 double(std::vector<double>
-                        *, bool),
+                    *, bool),
                 id1)
 
     MOCK_METHOD(eval_on_test_data,
                 3,
                 double(std::string, std::vector<double>
-                        *, bool),
+                    *, bool),
                 id2)
 
     MOCK_METHOD(eval_on_test_data,
                 3,
                 double(std::ofstream
-                        *, std::vector<double> *, bool),
+                    *, std::vector<double> *, bool),
                 id3)
 
     MOCK_METHOD(eval_on_data_set,
                 3,
                 double(DataSet
-                        *, std::vector<double> *, bool),
+                    *, std::vector<double> *, bool),
                 id4)
 
     MOCK_METHOD(eval_on_data_set,
                 4,
                 double(DataSet
-                        *, std::string, std::vector<double> *, bool),
+                    *, std::string, std::vector<double> *, bool),
                 id5)
 
     MOCK_METHOD(eval_on_data_set,
                 4,
                 double(DataSet
-                        *, std::ofstream *, std::vector<double> *, bool),
+                    *, std::ofstream *, std::vector<double> *, bool),
                 id6)
 
 
diff --git a/src/General/ExprtkWrapperSerialization.h b/src/General/ExprtkWrapperSerialization.h
index a57a8823..15304812 100644
--- a/src/General/ExprtkWrapperSerialization.h
+++ b/src/General/ExprtkWrapperSerialization.h
@@ -13,8 +13,8 @@
 BOOST_CLASS_EXPORT_KEY(ExprtkWrapper);
 
 typedef exprtk::symbol_table<double> symbol_table_t;
-typedef exprtk::expression<double> expression_t;
-typedef exprtk::parser<double> parser_t;
+typedef exprtk::expression<double>   expression_t;
+typedef exprtk::parser<double>       parser_t;
 
 /**
  * Class implementing the private properties
diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index 3d6d71e4..d9a2c55b 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -14,10 +14,10 @@ namespace lib4neuro {
                                      size_t n_to_restart,
                                      int max_iters,
                                      size_t batch) {
-        this->tolerance = epsilon;
+        this->tolerance         = epsilon;
         this->restart_frequency = n_to_restart;
-        this->maximum_niters = max_iters;
-        this->batch = batch;
+        this->maximum_niters    = max_iters;
+        this->batch             = batch;
     }
 
     GradientDescent::~GradientDescent() {
@@ -44,25 +44,25 @@ namespace lib4neuro {
     }
 
     bool GradientDescent::perform_feasible_1D_step(
-            lib4neuro::ErrorFunction& ef,
-            double error_previous,
-            double step_coefficient,
-            std::shared_ptr<std::vector<double>> direction,
-            std::shared_ptr<std::vector<double>> parameters_before,
-            std::shared_ptr<std::vector<double>> parameters_after
+        lib4neuro::ErrorFunction& ef,
+        double error_previous,
+        double step_coefficient,
+        std::shared_ptr<std::vector<double>> direction,
+        std::shared_ptr<std::vector<double>> parameters_before,
+        std::shared_ptr<std::vector<double>> parameters_after
     ) {
 
         size_t i;
 
-        boost::random::mt19937 gen(std::time(0));
+        boost::random::mt19937                    gen(std::time(0));
         boost::random::uniform_int_distribution<> dis(0,
                                                       direction->size());
-        size_t max_dir_idx = dis(gen);
+        size_t                                    max_dir_idx = dis(gen);
 
         double error_current = error_previous + 1.0;
         while (error_current >= error_previous) {
             (*parameters_after)[max_dir_idx] =
-                    (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
+                (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
 
             error_current = ef.eval(parameters_after.get());
             if (step_coefficient < 1e-32) {
@@ -92,16 +92,16 @@ namespace lib4neuro {
             *ofs << "Initial error: " << ef.eval() << std::endl;
         }
 
-        double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
-        double grad_norm_prev;
-        size_t i;
-        long long int iter_idx = this->maximum_niters;
-        size_t iter_counter = 0;
+        double        grad_norm    = this->tolerance * 10.0, gamma, sx, beta;
+        double        grad_norm_prev;
+        size_t        i;
+        long long int iter_idx     = this->maximum_niters;
+        size_t        iter_counter = 0;
 
-        gamma = 1.0;
+        gamma                = 1.0;
         double prev_val, val = 0.0, c = 1.25;
 
-        size_t n_parameters = ef.get_dimension();
+        size_t n_parameters                 = ef.get_dimension();
 
 
         std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
@@ -120,11 +120,11 @@ namespace lib4neuro {
 
         val = ef.eval(params_current);
         size_t counter_good_guesses = 0, counter_bad_guesses = 0, counter_simplified_direction_good = 0, counter_simplified_direction_bad = 0;
-        double cooling = 1.0;
+        double cooling              = 1.0;
         while (grad_norm > this->tolerance && (iter_idx != 0)) {
             iter_idx--;
             iter_counter++;
-            prev_val = val;
+            prev_val       = val;
             grad_norm_prev = grad_norm;
 
             /* reset of the current gradient */
@@ -147,11 +147,11 @@ namespace lib4neuro {
             /* step length calculation */
             if (iter_counter < 10 || iter_counter % this->restart_frequency == 0) {
                 /* fixed step length */
-                gamma = 0.1 * this->tolerance;
+                gamma   = 0.1 * this->tolerance;
                 cooling = 1.0;
             } else {
                 /* angle between two consecutive gradients */
-                sx = 0.0;
+                sx     = 0.0;
                 for (i = 0; i < gradient_current->size(); ++i) {
                     sx += (gradient_current->at(i) * gradient_prev->at(i));
                 }
@@ -161,7 +161,7 @@ namespace lib4neuro {
                 } else if (sx > 1.0 - 5e-12) {
                     sx = 1 - 5e-12;
                 }
-                beta = std::sqrt(std::acos(sx) / lib4neuro::PI);
+                beta   = std::sqrt(std::acos(sx) / lib4neuro::PI);
 
                 eval_step_size_mk(gamma,
                                   beta,
@@ -181,12 +181,12 @@ namespace lib4neuro {
 
 
             /* switcheroo */
-            ptr_mem = gradient_prev;
-            gradient_prev = gradient_current;
+            ptr_mem          = gradient_prev;
+            gradient_prev    = gradient_current;
             gradient_current = ptr_mem;
 
-            ptr_mem = params_prev;
-            params_prev = params_current;
+            ptr_mem        = params_prev;
+            params_prev    = params_current;
             params_current = ptr_mem;
 
 
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index 7d76d1b1..52ff7580 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -78,12 +78,12 @@ namespace lib4neuro {
          * @param parameters_after[out] suggested state of the parameters after the analysis completes
          */
         virtual bool perform_feasible_1D_step(
-                lib4neuro::ErrorFunction& ef,
-                double error_previous,
-                double step_coefficient,
-                std::shared_ptr<std::vector<double>> direction,
-                std::shared_ptr<std::vector<double>> parameters_before,
-                std::shared_ptr<std::vector<double>> parameters_after
+            lib4neuro::ErrorFunction& ef,
+            double error_previous,
+            double step_coefficient,
+            std::shared_ptr<std::vector<double>> direction,
+            std::shared_ptr<std::vector<double>> parameters_before,
+            std::shared_ptr<std::vector<double>> parameters_after
         );
 
     public:
diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp
index 4269e10a..971d15ef 100644
--- a/src/LearningMethods/GradientDescentBB.cpp
+++ b/src/LearningMethods/GradientDescentBB.cpp
@@ -13,10 +13,10 @@ namespace lib4neuro {
                                          size_t n_to_restart,
                                          int max_iters,
                                          size_t batch) {
-        this->tolerance = epsilon;
+        this->tolerance         = epsilon;
         this->restart_frequency = n_to_restart;
-        this->maximum_niters = max_iters;
-        this->batch = batch;
+        this->maximum_niters    = max_iters;
+        this->batch             = batch;
     }
 
     GradientDescentBB::~GradientDescentBB() {
@@ -35,16 +35,16 @@ namespace lib4neuro {
             *ofs << "Initial error: " << ef.eval() << std::endl;
         }
 
-        double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
-        double grad_norm_prev;
-        size_t i;
-        long long int iter_idx = this->maximum_niters;
-        size_t iter_counter = 0;
+        double        grad_norm    = this->tolerance * 10.0, gamma, sx, beta;
+        double        grad_norm_prev;
+        size_t        i;
+        long long int iter_idx     = this->maximum_niters;
+        size_t        iter_counter = 0;
 
-        gamma = 1.0;
+        gamma                = 1.0;
         double prev_val, val = 0.0, c = 1.25, val_best;
 
-        size_t n_parameters = ef.get_dimension();
+        size_t n_parameters                 = ef.get_dimension();
 
 
         std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
@@ -55,9 +55,9 @@ namespace lib4neuro {
 
         std::vector<double>* ptr_mem;
 
-        double alpha = -1.0, cc, gg;
+        double              alpha = -1.0, cc, gg;
         std::vector<double> dot__(3);
-        double d1 = 0.0, d2 = 0.0, d3 = 0.0;
+        double              d1    = 0.0, d2 = 0.0, d3 = 0.0;
 
 
         std::fill(gradient_current->begin(),
@@ -66,14 +66,14 @@ namespace lib4neuro {
         std::fill(gradient_prev->begin(),
                   gradient_prev->end(),
                   0.0);
-        val = ef.eval(params_current);
+        val      = ef.eval(params_current);
         val_best = val;
 
         double cooling_factor = 1.0;
         while (grad_norm > this->tolerance && (iter_idx != 0)) {
             iter_idx--;
             iter_counter++;
-            prev_val = val;
+            prev_val       = val;
             grad_norm_prev = grad_norm;
 
             /* reset of the current gradient */
@@ -97,7 +97,7 @@ namespace lib4neuro {
             /* step length calculation */
             if (iter_counter < 10 || iter_counter % this->restart_frequency < 10) {
                 /* fixed step length */
-                gamma = 0.1 * this->tolerance;
+                gamma          = 0.1 * this->tolerance;
                 cooling_factor = 1.0;
             } else {
 
@@ -131,12 +131,12 @@ namespace lib4neuro {
 
 
             /* switcheroo */
-            ptr_mem = gradient_prev;
-            gradient_prev = gradient_current;
+            ptr_mem          = gradient_prev;
+            gradient_prev    = gradient_current;
             gradient_current = ptr_mem;
 
-            ptr_mem = params_prev;
-            params_prev = params_current;
+            ptr_mem        = params_prev;
+            params_prev    = params_current;
             params_current = ptr_mem;
 
             val = ef.eval(params_current);
diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp
index 784f43c9..1f9cdc30 100644
--- a/src/LearningMethods/GradientDescentSingleItem.cpp
+++ b/src/LearningMethods/GradientDescentSingleItem.cpp
@@ -15,10 +15,10 @@ namespace lib4neuro {
                                                          size_t n_to_restart,
                                                          int max_iters,
                                                          size_t batch) {
-        this->tolerance = epsilon;
+        this->tolerance         = epsilon;
         this->restart_frequency = n_to_restart;
-        this->maximum_niters = max_iters;
-        this->batch = batch;
+        this->maximum_niters    = max_iters;
+        this->batch             = batch;
     }
 
     GradientDescentSingleItem::~GradientDescentSingleItem() {
@@ -36,7 +36,7 @@ namespace lib4neuro {
 
         double alpha = 10.0 / n_elems;
         alpha = 1.0;
-        double value = f.eval();
+        double value         = f.eval();
         double value_shifted = value + 1.0;
 
 
@@ -58,13 +58,13 @@ namespace lib4neuro {
                                              std::ofstream* ofs) {
 
         COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..."
-                          << std::endl);
+                      << std::endl);
         COUT_INFO("Initial error: " << ef.eval() << std::endl);
 
         size_t total_elements = ef.get_n_data_set(), updated_elements = 0, iter = 0;
-        double max_error = 1.0, error, gamma;
-        size_t iter_idx = this->maximum_niters;
-        size_t dim = ef.get_dimension();
+        double max_error      = 1.0, error, gamma;
+        size_t iter_idx       = this->maximum_niters;
+        size_t dim            = ef.get_dimension();
 
         std::vector<double> parameter_vector = ef.get_parameters();
         std::vector<double> gradient_vector(dim);
@@ -74,7 +74,7 @@ namespace lib4neuro {
             iter_idx--;
             iter++;
 
-            max_error = 0.0;
+            max_error        = 0.0;
             updated_elements = 0;
             std::fill(search_direction.begin(),
                       search_direction.end(),
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index 274e8cf9..44e56707 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -12,7 +12,7 @@ namespace lib4neuro {
 
     LearningSequence::LearningSequence(double tolerance,
                                        int max_n_cycles) {
-        this->tol = tolerance;
+        this->tol                  = tolerance;
         this->max_number_of_cycles = max_n_cycles;
     }
 
@@ -27,7 +27,7 @@ namespace lib4neuro {
         double error = ef.eval();
         this->optimal_parameters = ef.get_parameters();
         double the_best_error = error;
-        int mcycles = this->max_number_of_cycles, cycle_idx = 0;
+        int    mcycles        = this->max_number_of_cycles, cycle_idx = 0;
 
         std::vector<double> params;
         while (error > this->tol && mcycles != 0) {
@@ -40,7 +40,7 @@ namespace lib4neuro {
 
                 //TODO do NOT copy vectors if not needed
                 params = *m->get_parameters();
-                error = ef.eval(&params);
+                error  = ef.eval(&params);
 
                 ef.set_parameters(params);
 
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index a55879a5..b2c46aed 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -43,21 +43,21 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
 };
 
 void lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl::get_jacobian_and_rhs(
-        lib4neuro::ErrorFunction& ef,
-        arma::Mat<double>& J,
-        arma::Col<double>& rhs,
-        size_t data_subset_size) {
+    lib4neuro::ErrorFunction& ef,
+    arma::Mat<double>& J,
+    arma::Col<double>& rhs,
+    size_t data_subset_size) {
 
 
     std::vector<std::vector<double>> jacobian;
-    std::vector<double> rhs_vec;
+    std::vector<double>              rhs_vec;
 
     ef.divide_data_train_test((double) data_subset_size / (double) ef.get_n_data_set());
     ef.get_jacobian_and_rhs(jacobian,
                             rhs_vec);
     ef.return_full_data_set_for_training();
 
-    size_t dim_out = jacobian.size();
+    size_t dim_out      = jacobian.size();
     size_t n_parameters = rhs_vec.size();
 
     J.reshape(dim_out,
@@ -88,15 +88,15 @@ namespace lib4neuro {
                                            double lambda_increase,
                                            double lambda_decrease) : p_impl(new LevenbergMarquardtImpl()) {
 
-        this->p_impl->batch_size = bs;
-        this->p_impl->tolerance = tolerance;
-        this->p_impl->tolerance_gradient = tolerance_gradient;
-        this->p_impl->tolerance_parameters = tolerance_parameters;
+        this->p_impl->batch_size                   = bs;
+        this->p_impl->tolerance                    = tolerance;
+        this->p_impl->tolerance_gradient           = tolerance_gradient;
+        this->p_impl->tolerance_parameters         = tolerance_parameters;
         this->p_impl->LM_step_acceptance_threshold = LM_step_acceptance_threshold;
-        this->p_impl->lambda_initial = lambda_initial;
-        this->p_impl->lambda_increase = lambda_increase;
-        this->p_impl->lambda_decrease = lambda_decrease;
-        this->p_impl->maximum_niters = max_iters;
+        this->p_impl->lambda_initial               = lambda_initial;
+        this->p_impl->lambda_increase              = lambda_increase;
+        this->p_impl->lambda_decrease              = lambda_decrease;
+        this->p_impl->maximum_niters               = max_iters;
     }
 
     void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
@@ -112,13 +112,13 @@ namespace lib4neuro {
         double current_err = ef.eval();
 
         COUT_INFO(
-                "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
+            "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
         if (ofs && ofs->is_open()) {
             *ofs << "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err
                  << std::endl;
         }
 
-        size_t n_parameters = ef.get_dimension();
+        size_t n_parameters  = ef.get_dimension();
         size_t n_data_points = ef.get_n_data_set();
         if (this->p_impl->batch_size > 0) {
             n_data_points = this->p_impl->batch_size;
@@ -134,21 +134,21 @@ namespace lib4neuro {
         arma::Mat<double> H_new(n_data_points,
                                 n_parameters);
 
-        double lambda = this->p_impl->lambda_initial;  // Dumping parameter
+        double lambda   = this->p_impl->lambda_initial;  // Dumping parameter
         double prev_err = 0, update_norm = 0, gradient_norm = 0, mem_double = 0, jacobian_norm = 1;
 
 
-        bool update_J = true;
-        arma::Col<double> update;
-        arma::Col<double> rhs;
+        bool                update_J = true;
+        arma::Col<double>   update;
+        arma::Col<double>   rhs;
         std::vector<double> d_prep(n_data_points);
-        arma::Col<double> d;
+        arma::Col<double>   d;
 
         double slowdown_coeff = 0.25;
         //-------------------//
         // Solver iterations //
         //-------------------//
-        size_t iter_counter = 0;
+        size_t iter_counter   = 0;
         do {
 
             if (update_J) {
@@ -166,7 +166,7 @@ namespace lib4neuro {
                     mem_double *= mem_double;
                     gradient_norm += mem_double;
                 }
-                gradient_norm = std::sqrt(gradient_norm) / J.n_rows;
+                gradient_norm  = std::sqrt(gradient_norm) / J.n_rows;
 
                 /* Get approximation of Hessian (H ~ J'*J) */
                 H = J.t() * J;
@@ -179,7 +179,7 @@ namespace lib4neuro {
                                                          ci);
                     }
                 }
-                jacobian_norm = std::sqrt(jacobian_norm);
+                jacobian_norm  = std::sqrt(jacobian_norm);
 
                 /* Evaluate the error before updating parameters */
                 prev_err = ef.eval();
@@ -200,8 +200,8 @@ namespace lib4neuro {
                 params_tmp->at(i) = params_current->at(i) + update.at(i);
                 update_norm += update.at(i) * update.at(i);
             }
-            update_norm = std::sqrt(update_norm);
-            current_err = ef.eval(params_tmp.get());
+            update_norm   = std::sqrt(update_norm);
+            current_err   = ef.eval(params_tmp.get());
 
             /* Check, if the parameter update improved the function */
             if (current_err < prev_err) {
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index 123f0f42..8d376841 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -35,8 +35,8 @@
 void Particle::randomize_coordinates() {
 
     std::random_device seeder;
-    std::mt19937 gen(seeder());
-    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
+    std::mt19937       gen(seeder());
+    for (unsigned int  i = 0; i < this->coordinate_dim; ++i) {
         std::uniform_real_distribution<double> dist_coord(this->domain_bounds->at(2 * i),
                                                           this->domain_bounds->at(2 * i + 1));
         (*this->coordinate)[i] = dist_coord(gen);
@@ -45,8 +45,8 @@ void Particle::randomize_coordinates() {
 
 void Particle::randomize_parameters() {
 
-    std::random_device seeder;
-    std::mt19937 gen(seeder());
+    std::random_device                     seeder;
+    std::mt19937                           gen(seeder());
     std::uniform_real_distribution<double> dist_vel(0.5,
                                                     1.0);
     this->r1 = dist_vel(gen);
@@ -55,11 +55,11 @@ void Particle::randomize_parameters() {
 }
 
 void Particle::randomize_velocity() {
-    std::random_device seeder;
-    std::mt19937 gen(seeder());
+    std::random_device                     seeder;
+    std::mt19937                           gen(seeder());
     std::uniform_real_distribution<double> dist_vel(0.5,
                                                     1.0);
-    for (unsigned int i = 0; i < this->coordinate_dim; ++i) {
+    for (unsigned int                      i = 0; i < this->coordinate_dim; ++i) {
         (*this->velocity)[i] = dist_vel(gen);
     }
 }
@@ -67,13 +67,13 @@ void Particle::randomize_velocity() {
 Particle::Particle(lib4neuro::ErrorFunction* ef,
                    std::vector<double>* domain_bounds) {
 
-    this->ef = ef;
-    this->domain_bounds = new std::vector<double>(*domain_bounds);
+    this->ef             = ef;
+    this->domain_bounds  = new std::vector<double>(*domain_bounds);
     this->coordinate_dim = ef->get_dimension();
-    this->ef = ef;
+    this->ef             = ef;
 
-    this->coordinate = new std::vector<double>(this->coordinate_dim);
-    this->velocity = new std::vector<double>(this->coordinate_dim);
+    this->coordinate         = new std::vector<double>(this->coordinate_dim);
+    this->velocity           = new std::vector<double>(this->coordinate_dim);
     this->optimal_coordinate = new std::vector<double>(this->coordinate_dim);
 
 
@@ -103,15 +103,15 @@ Particle::Particle(lib4neuro::ErrorFunction* ef,
 
 
     for (size_t i = 0; i < central_system->size(); ++i) {
-        this->domain_bounds->at(2 * i) = central_system->at(i) - dispersion_coeff;
+        this->domain_bounds->at(2 * i)     = central_system->at(i) - dispersion_coeff;
         this->domain_bounds->at(2 * i + 1) = central_system->at(i) + dispersion_coeff;
     }
 
     this->coordinate_dim = ef->get_dimension();
-    this->ef = ef;
+    this->ef             = ef;
 
-    this->coordinate = new std::vector<double>(this->coordinate_dim);
-    this->velocity = new std::vector<double>(this->coordinate_dim);
+    this->coordinate         = new std::vector<double>(this->coordinate_dim);
+    this->velocity           = new std::vector<double>(this->coordinate_dim);
     this->optimal_coordinate = new std::vector<double>(this->coordinate_dim);
 
 
@@ -185,8 +185,8 @@ double Particle::change_coordinate(double w,
 
     /* Choose random global minima */
     std::vector<double>* random_global_best;
-    std::random_device rand_dev;
-    std::mt19937 engine{rand_dev()};
+    std::random_device                    rand_dev;
+    std::mt19937                          engine{rand_dev()};
     std::uniform_int_distribution<size_t> dist(0,
                                                global_min_vec.size() - 1);
     random_global_best = &global_min_vec[dist(engine)];
@@ -260,13 +260,13 @@ namespace lib4neuro {
 
         if (epsilon < 0 || gamma < 0 || delta < 0) {
             THROW_INVALID_ARGUMENT_ERROR(
-                    "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
+                "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
         }
 
-        this->gamma = gamma;
+        this->gamma   = gamma;
         this->epsilon = epsilon;
-        this->delta = delta;
-        this->pst = PARTICLE_SWARM_TYPE::GENERAL;
+        this->delta   = delta;
+        this->pst     = PARTICLE_SWARM_TYPE::GENERAL;
 
         this->init_constructor(domain_bounds,
                                c1,
@@ -298,15 +298,15 @@ namespace lib4neuro {
 
         if (this->epsilon < 0 || this->gamma < 0 || this->delta < 0) {
             THROW_INVALID_ARGUMENT_ERROR(
-                    "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
+                "Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
         }
 
-        this->func_dim = ef.get_dimension();
+        this->func_dim         = ef.get_dimension();
 
 
         /* initialize the particles */
         std::vector<double> centroids(ef.get_parameters());
-        for (size_t pi = 0; pi < this->particle_swarm.size(); ++pi) {
+        for (size_t         pi = 0; pi < this->particle_swarm.size(); ++pi) {
             if (this->particle_swarm.at(pi)) {
                 delete this->particle_swarm.at(pi);
             }
@@ -318,29 +318,29 @@ namespace lib4neuro {
 
         this->optimal_parameters.resize(this->func_dim);
 
-        size_t outer_it = 0;
+        size_t outer_it                                = 0;
         Particle* particle;
 
         std::vector<std::vector<double>> global_best_vec;
-        double optimal_value = 0.0;
+        double                           optimal_value = 0.0;
 
         std::set<Particle*> cluster; //!< Particles in a cluster
         std::vector<double>* centroid = new std::vector<double>(this->func_dim);//<! Centroid coordinates
 
         double tmp_velocity;
-        double prev_max_velocity = 0;
+        double prev_max_velocity      = 0;
         double max_velocity;
-        double max_vel_step = 0;
+        double max_vel_step           = 0;
         double prev_max_vel_step;
         double euclidean_dist;
-        double current_err = -1;
+        double current_err            = -1;
 
         this->determine_optimal_coordinate_and_value(this->optimal_parameters,
                                                      optimal_value);
         COUT_INFO("Initial best value: " << optimal_value << std::endl);
 
         while (outer_it < this->iter_max) {
-            max_velocity = 0;
+            max_velocity   = 0;
             euclidean_dist = 0;
 
             //////////////////////////////////////////////////
@@ -377,7 +377,7 @@ namespace lib4neuro {
             }
 
             for (size_t pi = 0; pi < this->n_particles; pi++) {
-                particle = this->particle_swarm.at(pi);
+                particle     = this->particle_swarm.at(pi);
                 tmp_velocity = particle->change_coordinate(this->w,
                                                            this->c1,
                                                            this->c2,
@@ -386,7 +386,7 @@ namespace lib4neuro {
 
                 if (tmp_velocity > max_velocity) {
                     prev_max_velocity = max_velocity;
-                    max_velocity = tmp_velocity;
+                    max_velocity      = tmp_velocity;
                 }
 
                 /* Looking for nearby particles */
@@ -405,7 +405,7 @@ namespace lib4neuro {
             //}
 
             prev_max_vel_step = max_vel_step;
-            max_vel_step = max_velocity - prev_max_velocity;
+            max_vel_step      = max_velocity - prev_max_velocity;
 
             //TODO only in verbose mode
             euclidean_dist /= this->n_particles;
@@ -480,7 +480,7 @@ namespace lib4neuro {
         }
 
         this->err_thresh = err_thresh;
-        this->pst = pst;
+        this->pst        = pst;
 
         this->init_constructor(domain_bounds,
                                c1,
@@ -534,8 +534,8 @@ namespace lib4neuro {
 
     double ParticleSwarm::get_euclidean_distance(std::vector<double>* a,
                                                  std::vector<double>* b) {
-        double dist = 0, m;
-        for (size_t i = 0; i < a->size(); i++) {
+        double      dist = 0, m;
+        for (size_t i    = 0; i < a->size(); i++) {
             m = (*a)[i] - (*b)[i];
             m *= m;
             dist += m;
@@ -549,12 +549,12 @@ namespace lib4neuro {
                                          double w,
                                          size_t n_particles,
                                          size_t iter_max) {
-        this->c1 = c1;
-        this->c2 = c2;
-        this->c3 = (c1 + c2) / 2.0;
-        this->w = w;
+        this->c1          = c1;
+        this->c2          = c2;
+        this->c3          = (c1 + c2) / 2.0;
+        this->w           = w;
         this->n_particles = n_particles;
-        this->iter_max = iter_max;
+        this->iter_max    = iter_max;
         this->particle_swarm.resize(this->n_particles);
         std::fill(this->particle_swarm.begin(),
                   this->particle_swarm.end(),
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index 57ce75c8..642b925c 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -21,7 +21,7 @@ private:
 
     size_t coordinate_dim;
     std::vector<double>* coordinate = nullptr;
-    std::vector<double>* velocity = nullptr;
+    std::vector<double>* velocity   = nullptr;
 
     std::vector<double>* optimal_coordinate = nullptr;
     double optimal_value;
@@ -256,15 +256,15 @@ namespace lib4neuro {
          * @param iter_max Maximal number of iterations - optimization will stop after that, even if not converged
          */
         LIB4NEURO_API explicit ParticleSwarm(
-                std::vector<double>* domain_bounds,
-                double c1 = 1.711897,
-                double c2 = 1.711897,
-                double w = 0.711897,
-                double gamma = 0.5,
-                double epsilon = 0.02,
-                double delta = 0.7,
-                size_t n_particles = 50,
-                size_t iter_max = 1000
+            std::vector<double>* domain_bounds,
+            double c1 = 1.711897,
+            double c2 = 1.711897,
+            double w = 0.711897,
+            double gamma = 0.5,
+            double epsilon = 0.02,
+            double delta = 0.7,
+            size_t n_particles = 50,
+            size_t iter_max = 1000
         );
 
         /**
@@ -286,14 +286,14 @@ namespace lib4neuro {
          *                   ErrorFunction
          */
         LIB4NEURO_API explicit ParticleSwarm(
-                std::vector<double>* domain_bounds,
-                double err_thresh,
-                PARTICLE_SWARM_TYPE,
-                double c1 = 1.711897,
-                double c2 = 1.711897,
-                double w = 0.711897,
-                size_t n_particles = 50,
-                size_t iter_max = 1000
+            std::vector<double>* domain_bounds,
+            double err_thresh,
+            PARTICLE_SWARM_TYPE,
+            double c1 = 1.711897,
+            double c2 = 1.711897,
+            double w = 0.711897,
+            size_t n_particles = 50,
+            size_t iter_max = 1000
         );
 
         /**
diff --git a/src/NetConnection/ConnectionFunctionIdentity.cpp b/src/NetConnection/ConnectionFunctionIdentity.cpp
index e5bb8ab6..298cb7d6 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.cpp
+++ b/src/NetConnection/ConnectionFunctionIdentity.cpp
@@ -18,7 +18,7 @@ ConnectionFunctionIdentity::ConnectionFunctionIdentity() {
 }
 
 ConnectionFunctionIdentity::ConnectionFunctionIdentity(size_t pidx) {
-    this->param_idx = pidx;
+    this->param_idx  = pidx;
     this->is_unitary = false;
 }
 
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index 5ba0aa99..fe8bc379 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -19,8 +19,8 @@ namespace lib4neuro {
     NeuralNetwork::NeuralNetwork() {
 
 
-        this->delete_weights = true;
-        this->delete_biases = true;
+        this->delete_weights  = true;
+        this->delete_biases   = true;
         this->layers_analyzed = false;
     }
 
@@ -30,10 +30,11 @@ namespace lib4neuro {
             try {
                 boost::archive::text_iarchive ia(ifs);
                 ia >> *this;
-            } catch (boost::archive::archive_exception& e) {
+            }
+            catch (boost::archive::archive_exception& e) {
                 THROW_RUNTIME_ERROR(
-                        "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
-                                                                   "the serialized DataSet.");
+                    "Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
+                                                               "the serialized DataSet.");
             }
             ifs.close();
         } else {
@@ -95,7 +96,7 @@ namespace lib4neuro {
         }
 
         double potential, bias;
-        int bias_idx;
+        int    bias_idx;
 
         this->copy_parameter_space(custom_weights_and_biases);
 
@@ -123,8 +124,8 @@ namespace lib4neuro {
             /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
 
             for (auto si: *layer) {
-                bias = 0.0;
-                bias_idx = this->neuron_bias_indices.at(si);
+                bias      = 0.0;
+                bias_idx  = this->neuron_bias_indices.at(si);
                 if (bias_idx >= 0) {
                     bias = this->neuron_biases.at(bias_idx);
                 }
@@ -138,7 +139,7 @@ namespace lib4neuro {
                     size_t ci = c.second;
 
                     this->neuron_potentials.at(ti) +=
-                            this->connection_list.at(ci)->eval(this->connection_weights) * potential;
+                        this->connection_list.at(ci)->eval(this->connection_weights) * potential;
 
                     std::cout << "  adding input to neuron " << ti << " += "
                               << this->connection_list.at(ci)->eval(this->connection_weights) << "*" << potential
@@ -148,8 +149,8 @@ namespace lib4neuro {
         }
 
         unsigned int i = 0;
-        for (auto oi: this->output_neuron_indices) {
-            bias = 0.0;
+        for (auto    oi: this->output_neuron_indices) {
+            bias     = 0.0;
             bias_idx = this->neuron_bias_indices.at(oi);
             if (bias_idx >= 0) {
                 bias = this->neuron_biases.at(bias_idx);
@@ -256,9 +257,9 @@ namespace lib4neuro {
         this->neuron_biases.clear();
 
         this->connection_weights = parent_network.connection_weights;
-        this->neuron_biases = parent_network.neuron_biases;
+        this->neuron_biases      = parent_network.neuron_biases;
 
-        this->delete_biases = false;
+        this->delete_biases  = false;
         this->delete_weights = false;
     }
 
@@ -279,7 +280,7 @@ namespace lib4neuro {
         }
 
         double potential, bias;
-        int bias_idx;
+        int    bias_idx;
 
         this->copy_parameter_space(custom_weights_and_biases);
 
@@ -303,8 +304,8 @@ namespace lib4neuro {
             /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
 
             for (auto si: *layer) {
-                bias = 0.0;
-                bias_idx = this->neuron_bias_indices.at(si);
+                bias      = 0.0;
+                bias_idx  = this->neuron_bias_indices.at(si);
                 if (bias_idx >= 0) {
                     bias = this->neuron_biases.at(bias_idx);
                 }
@@ -316,14 +317,14 @@ namespace lib4neuro {
                     size_t ci = c.second;
 
                     this->neuron_potentials.at(ti) +=
-                            this->connection_list.at(ci)->eval(this->connection_weights) * potential;
+                        this->connection_list.at(ci)->eval(this->connection_weights) * potential;
                 }
             }
         }
 
         unsigned int i = 0;
-        for (auto oi: this->output_neuron_indices) {
-            bias = 0.0;
+        for (auto    oi: this->output_neuron_indices) {
+            bias     = 0.0;
             bias_idx = this->neuron_bias_indices.at(oi);
             if (bias_idx >= 0) {
                 bias = this->neuron_biases.at(bias_idx);
@@ -346,16 +347,16 @@ namespace lib4neuro {
 
         size_t bias_shift = this->get_n_weights();
         size_t neuron_idx;
-        int bias_idx;
+        int    bias_idx;
         double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;
 
         NeuronDifferentiable* active_neuron;
 
         /* initial error propagation */
         std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
-                this->neuron_layers_feedforward.size() - 1);
+            this->neuron_layers_feedforward.size() - 1);
         //TODO might not work in the future as the output neurons could be permuted
-        for (size_t i = 0; i < current_layer->size(); ++i) {
+        for (size_t                            i             = 0; i < current_layer->size(); ++i) {
             neuron_idx = current_layer->at(i);
             scaling_backprog[neuron_idx] = error_derivative[i] * error_scaling;
         }
@@ -367,22 +368,22 @@ namespace lib4neuro {
 
             for (size_t i = 0; i < current_layer->size(); ++i) {
 
-                neuron_idx = current_layer->at(i);
+                neuron_idx    = current_layer->at(i);
                 active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
 
                 if (active_neuron) {
-                    bias_idx = this->neuron_bias_indices.at(neuron_idx);
+                    bias_idx         = this->neuron_bias_indices.at(neuron_idx);
                     neuron_potential = this->neuron_potentials.at(neuron_idx);
 
                     if (bias_idx >= 0) {
                         neuron_bias = this->neuron_biases.at(bias_idx);
                         gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
                                                            active_neuron->activation_function_eval_derivative_bias(
-                                                                   neuron_potential,
-                                                                   neuron_bias);
+                                                               neuron_potential,
+                                                               neuron_bias);
                         scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
-                                neuron_potential,
-                                neuron_bias);
+                            neuron_potential,
+                            neuron_bias);
                     }
 
                     /* connections to lower level neurons */
@@ -391,7 +392,7 @@ namespace lib4neuro {
                         size_t ci = c.second;
 
                         neuron_potential_t = this->neurons.at(ti)->get_last_activation_value();
-                        connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
+                        connection_weight  = this->connection_list.at(ci)->eval(this->connection_weights);
 
                         this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
                                                                               gradient,
@@ -402,7 +403,7 @@ namespace lib4neuro {
                     }
                 } else {
                     THROW_INVALID_ARGUMENT_ERROR(
-                            "Neuron used in backpropagation does not contain differentiable activation function!\n");
+                        "Neuron used in backpropagation does not contain differentiable activation function!\n");
                 }
             }
         }
@@ -420,14 +421,14 @@ namespace lib4neuro {
 
         size_t bias_shift = this->get_n_weights();
         size_t neuron_idx;
-        int bias_idx;
+        int    bias_idx;
         double neuron_potential, neuron_activation_t, neuron_bias, connection_weight;
 
         NeuronDifferentiable* active_neuron;
 
         /* initial error propagation */
         std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
-                this->neuron_layers_feedforward.size() - 1);
+            this->neuron_layers_feedforward.size() - 1);
         //TODO might not work in the future as the output neurons could be permuted
         std::cout << "Error scaling on the output layer: ";
         for (size_t i = 0; i < current_layer->size(); ++i) {
@@ -445,24 +446,24 @@ namespace lib4neuro {
 
             for (size_t i = 0; i < current_layer->size(); ++i) {
 
-                neuron_idx = current_layer->at(i);
+                neuron_idx    = current_layer->at(i);
                 active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
 
                 if (active_neuron) {
                     std::cout << "  [backpropagation] active neuron: " << neuron_idx << std::endl;
 
-                    bias_idx = this->neuron_bias_indices.at(neuron_idx);
+                    bias_idx         = this->neuron_bias_indices.at(neuron_idx);
                     neuron_potential = this->neuron_potentials.at(neuron_idx);
 
                     if (bias_idx >= 0) {
                         neuron_bias = this->neuron_biases.at(bias_idx);
                         gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
                                                            active_neuron->activation_function_eval_derivative_bias(
-                                                                   neuron_potential,
-                                                                   neuron_bias);
+                                                               neuron_potential,
+                                                               neuron_bias);
                         scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
-                                neuron_potential,
-                                neuron_bias);
+                            neuron_potential,
+                            neuron_bias);
                     }
 
                     std::cout << "      [backpropagation] scaling coefficient: " << scaling_backprog[neuron_idx]
@@ -474,7 +475,7 @@ namespace lib4neuro {
                         size_t ci = c.second;
 
                         neuron_activation_t = this->neurons.at(ti)->get_last_activation_value();
-                        connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
+                        connection_weight   = this->connection_list.at(ci)->eval(this->connection_weights);
 
                         std::cout << "      [backpropagation] value (" << ti << "): " << neuron_activation_t
                                   << ", scaling: " << scaling_backprog[neuron_idx] << std::endl;
@@ -488,7 +489,7 @@ namespace lib4neuro {
                     }
                 } else {
                     THROW_INVALID_ARGUMENT_ERROR(
-                            "Neuron used in backpropagation does not contain differentiable activation function!\n");
+                        "Neuron used in backpropagation does not contain differentiable activation function!\n");
                 }
             }
         }
@@ -517,7 +518,7 @@ namespace lib4neuro {
         // Init weight guess ("optimal" for logistic activation functions)
         boost::random::uniform_real_distribution<> dist(-1,
                                                         1);
-        for (size_t i = 0; i < this->neuron_biases.size(); i++) {
+        for (size_t                                i = 0; i < this->neuron_biases.size(); i++) {
             this->neuron_biases.at(i) = dist(gen);
         }
     }
@@ -780,7 +781,7 @@ namespace lib4neuro {
 
 
         ::std::vector<size_t> active_eval_set(2 * n);
-        size_t active_set_size[2];
+        size_t                active_set_size[2];
 
         /* feedforward analysis */
         active_set_size[0] = 0;
@@ -881,8 +882,8 @@ namespace lib4neuro {
         }
 
 
-        this->delete_weights = true;
-        this->delete_biases = true;
+        this->delete_weights  = true;
+        this->delete_biases   = true;
         this->layers_analyzed = false;
 
         unsigned int inp_dim = neuron_numbers->at(0);  //!< Network input dimension
@@ -1004,7 +1005,7 @@ namespace lib4neuro {
 
         /* Init variables containing indices of INPUT nad OUTPUT neurons */
 
-        this->input_neuron_indices = input_layer_neuron_indices;
+        this->input_neuron_indices  = input_layer_neuron_indices;
         this->output_neuron_indices = current_layer_neuron_indices;
 
         this->analyze_layer_structure();
@@ -1039,7 +1040,7 @@ namespace lib4neuro {
                                          error_partial,
                                          1.0,
                                          jacobian[i]);
-            error[i] = data.second[i] - fv[i];
+            error[i]         = data.second[i] - fv[i];
             error_partial[i] = 0;
         }
     }
diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp
index c9c0b616..2ce253c5 100644
--- a/src/Network/NeuralNetworkSum.cpp
+++ b/src/Network/NeuralNetworkSum.cpp
@@ -16,7 +16,7 @@ BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
 namespace lib4neuro {
 
     NeuralNetworkSum::NeuralNetworkSum() {
-        this->summand = nullptr;
+        this->summand             = nullptr;
         this->summand_coefficient = nullptr;
     }
 
diff --git a/src/Neuron/NeuronLogistic.cpp b/src/Neuron/NeuronLogistic.cpp
index b8b296f2..8dcbca44 100644
--- a/src/Neuron/NeuronLogistic.cpp
+++ b/src/Neuron/NeuronLogistic.cpp
@@ -17,10 +17,10 @@ namespace lib4neuro {
                                        double b) {
         //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
 
-        double ex = std::pow(lib4neuro::E,
-                             x);
-        double eb = std::pow(E,
-                             b);
+        double ex    = std::pow(lib4neuro::E,
+                                x);
+        double eb    = std::pow(E,
+                                b);
         double denom = (eb + ex);
 
         this->activation_val = (eb * ex * (eb - ex)) / (denom * denom * denom);
@@ -31,11 +31,11 @@ namespace lib4neuro {
                                                                        double b) {
         //-(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
 
-        double eb = std::pow(E,
-                             b);
-        double ex = std::pow(E,
-                             x);
-        double ebex = eb * ex;
+        double eb    = std::pow(E,
+                                b);
+        double ex    = std::pow(E,
+                                x);
+        double ebex  = eb * ex;
         double denom = (eb + ex);
 
         return -(ebex * (-4 * ebex + eb * eb + ex * ex)) / (denom * denom * denom * denom);
@@ -60,11 +60,11 @@ namespace lib4neuro {
                                        double b) {
         //e^(b - x)/(e^(b - x) + 1)^2
 
-        double ex = std::pow(E,
-                             x);
-        double eb = std::pow(E,
-                             b);
-        double d = (eb / ex);
+        double ex    = std::pow(E,
+                                x);
+        double eb    = std::pow(E,
+                                b);
+        double d     = (eb / ex);
         double denom = (d + 1);
 
         this->activation_val = d / (denom * denom);
@@ -75,10 +75,10 @@ namespace lib4neuro {
                                                                        double b) {
         //(e^(b + x) (e^x - e^b))/(e^b + e^x)^3
 
-        double ex = std::pow(E,
-                             x);
-        double eb = std::pow(E,
-                             b);
+        double ex    = std::pow(E,
+                                x);
+        double eb    = std::pow(E,
+                                b);
         double denom = (eb + ex);
 
         return (eb * ex * (ex - eb)) / (denom * denom * denom);
@@ -115,10 +115,10 @@ namespace lib4neuro {
 
     double NeuronLogistic::activation_function_eval_derivative_bias(double x,
                                                                     double b) {
-        double ex = std::pow(E,
-                             b - x);
+        double ex    = std::pow(E,
+                                b - x);
         double denom = (ex + 1);
-        double res = -ex / (denom * denom);
+        double res   = -ex / (denom * denom);
 
         return res;
     }
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 6ce5c990..b76065d2 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -47,7 +47,7 @@ namespace lib4neuro {
 
     std::string MultiIndex::to_string() const {
         std::string output;
-        char buff[255];
+        char        buff[255];
 
         for (size_t i = 0; i < this->dim - 1; ++i) {
             sprintf(buff,
@@ -88,9 +88,9 @@ namespace lib4neuro {
                (int) n_inputs,
                (int) m);
 
-        this->dim_i = n_inputs;
-        this->dim_inn = m;
-        this->n_equations = n_equations;
+        this->dim_i           = n_inputs;
+        this->dim_inn         = m;
+        this->n_equations     = n_equations;
         this->errors_functions_types.resize(n_equations);
         this->errors_functions_data_sets.resize(n_equations);
 
@@ -98,8 +98,8 @@ namespace lib4neuro {
 
         /* input neurons */
         std::vector<size_t> input_set(this->dim_i);
-        size_t idx;
-        for (size_t i = 0; i < this->dim_i; ++i) {
+        size_t              idx;
+        for (size_t         i = 0; i < this->dim_i; ++i) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
             idx = this->solution->add_neuron(new_neuron,
@@ -110,7 +110,7 @@ namespace lib4neuro {
         size_t first_input_neuron = input_set[0];
 
         /* output neuron */
-        std::vector<size_t> output_set(1);
+        std::vector<size_t>     output_set(1);
         std::shared_ptr<Neuron> new_neuron;
         new_neuron.reset(new NeuronLinear());
         idx = this->solution->add_neuron(new_neuron,
@@ -120,8 +120,8 @@ namespace lib4neuro {
         size_t first_output_neuron = idx;
 
         /* inner neurons */
-        size_t first_inner_neuron = 0;
-        for (size_t i = 0; i < this->dim_inn; ++i) {
+        size_t      first_inner_neuron = 0;
+        for (size_t i                  = 0; i < this->dim_inn; ++i) {
             std::shared_ptr<NeuronLogistic> new_neuron2;
             new_neuron2.reset(new NeuronLogistic());
             this->solution_inner_neurons.push_back(new_neuron2);
@@ -134,8 +134,8 @@ namespace lib4neuro {
         }
 
         /* connections between input neurons and inner neurons */
-        size_t weight_idx;
-        for (size_t i = 0; i < this->dim_i; ++i) {
+        size_t      weight_idx;
+        for (size_t i                  = 0; i < this->dim_i; ++i) {
             for (size_t j = 0; j < this->dim_inn; ++j) {
                 weight_idx = this->solution->add_connection_simple(first_input_neuron + i,
                                                                    first_inner_neuron + j,
@@ -201,7 +201,7 @@ namespace lib4neuro {
         /* retrieve indices of the variables according to which we perform the derivations ( applicable to any order, not just 2 or less )*/
         std::vector<size_t> partial_derivative_indices;
         partial_derivative_indices.reserve(derivative_degree);
-        for (size_t i = 0; i < alpha.get_partial_derivatives_degrees()->size(); ++i) {
+        for (size_t i         = 0; i < alpha.get_partial_derivatives_degrees()->size(); ++i) {
             size_t degree = alpha.get_partial_derivatives_degrees()->at(i);
 
             while (degree > 0) {
@@ -235,8 +235,8 @@ namespace lib4neuro {
 
         /* input neurons */
         std::vector<size_t> input_set(this->dim_i);
-        size_t idx;
-        for (size_t i = 0; i < this->dim_i; ++i) {
+        size_t              idx;
+        for (size_t         i = 0; i < this->dim_i; ++i) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
             idx = new_net->add_neuron(new_neuron,
@@ -248,7 +248,7 @@ namespace lib4neuro {
 
 
         /* output neurons */
-        std::vector<size_t> output_set(1);
+        std::vector<size_t>     output_set(1);
         std::shared_ptr<Neuron> new_neuron;
         new_neuron.reset(new NeuronLinear());
         idx = new_net->add_neuron(new_neuron,
@@ -258,10 +258,10 @@ namespace lib4neuro {
         size_t first_output_neuron = idx;
 
         /* the new partial derivative has degree of at least one */
-        size_t first_inner_neuron = 0;
+        size_t                          first_inner_neuron = 0;
         std::shared_ptr<NeuronLogistic> n_ptr;
         std::shared_ptr<NeuronLogistic> n_ptr2;
-        for (size_t i = 0; i < this->dim_inn; ++i) {
+        for (size_t                     i                  = 0; i < this->dim_inn; ++i) {
             n_ptr = this->solution_inner_neurons.at(i);
 
             for (size_t j = 0; j < derivative_degree; ++j) {
@@ -280,8 +280,8 @@ namespace lib4neuro {
         }
 
         /* identity neurons serving as a 'glue'*/
-        size_t first_glue_neuron = idx + 1;
-        for (size_t i = 0; i < derivative_degree * this->dim_inn; ++i) {
+        size_t      first_glue_neuron = idx + 1;
+        for (size_t i                 = 0; i < derivative_degree * this->dim_inn; ++i) {
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
             idx = new_net->add_neuron(new_neuron,
@@ -289,8 +289,8 @@ namespace lib4neuro {
         }
 
         /* connections between input neurons and inner neurons */
-        size_t connection_idx = 0;
-        for (size_t i = 0; i < this->dim_i; ++i) {
+        size_t      connection_idx = 0;
+        for (size_t i              = 0; i < this->dim_i; ++i) {
             for (size_t j = 0; j < this->dim_inn; ++j) {
                 printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], connection index: %3d\n",
                        (int) i,
@@ -308,7 +308,7 @@ namespace lib4neuro {
         printf("----------------------------------------------------------------------------------------------------\n");
 
         /* connections between inner neurons and the first set of 'glueing' neurons */
-        for (size_t i = 0; i < this->dim_inn; ++i) {
+        for (size_t i  = 0; i < this->dim_inn; ++i) {
             printf("  adding a connection between inner neuron %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n",
                    (int) i,
                    (int) (first_inner_neuron + i),
@@ -323,7 +323,7 @@ namespace lib4neuro {
         }
         printf("----------------------------------------------------------------------------------------------------\n");
 
-        size_t pd_idx;
+        size_t      pd_idx;
         /* connections between glueing neurons */
         for (size_t di = 0; di < derivative_degree - 1; ++di) {
             pd_idx = partial_derivative_indices[di];/* partial derivative index */
@@ -395,7 +395,7 @@ namespace lib4neuro {
 
 
         /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
-        ErrorSum total_error;
+        ErrorSum    total_error;
         for (size_t i = 0; i < this->n_equations; ++i) {
             if (this->errors_functions_types.at(i) == ErrorFunctionType::ErrorFuncMSE) {
                 total_error.add_error_function(new MSE(this->differential_equations.at(i).get(),
@@ -450,7 +450,7 @@ namespace lib4neuro {
 
         ///* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
         std::vector<ErrorFunction*> error_functions(this->n_equations);
-        for (size_t i = 0; i < this->n_equations; ++i) {
+        for (size_t                 i = 0; i < this->n_equations; ++i) {
             if (this->errors_functions_types.at(i) == ErrorFunctionType::ErrorFuncMSE) {
                 error_functions[i] = new MSE(this->differential_equations.at(i).get(),
                                              &this->errors_functions_data_sets.at(i));
@@ -462,8 +462,8 @@ namespace lib4neuro {
         }
 
         /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
-        ErrorSum total_error;
-        for (size_t i = 0; i < this->n_equations; ++i) {
+        ErrorSum    total_error;
+        for (size_t i                 = 0; i < this->n_equations; ++i) {
             total_error.add_error_function(error_functions[i],
                                            1.0);
         }
diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h
index 05c1c0fb..b1f4781c 100644
--- a/src/Solvers/DESolver.h
+++ b/src/Solvers/DESolver.h
@@ -98,14 +98,14 @@ namespace lib4neuro {
 
         /* Error functions for differential equations */
         std::vector<ErrorFunctionType> errors_functions_types; // = nullptr;
-        std::vector<DataSet> errors_functions_data_sets; // = nullptr;
+        std::vector<DataSet>           errors_functions_data_sets; // = nullptr;
 
         /* NN as the unknown function */
-        std::shared_ptr<NeuralNetwork> solution = std::make_shared<NeuralNetwork>(NeuralNetwork());
+        std::shared_ptr<NeuralNetwork> solution            = std::make_shared<NeuralNetwork>(NeuralNetwork());
 
         /* auxilliary variables */
         std::vector<std::shared_ptr<NeuronLogistic>> solution_inner_neurons; // = nullptr;
-        size_t dim_i = 0, dim_inn = 0, n_equations = 0;
+        size_t                                       dim_i = 0, dim_inn = 0, n_equations = 0;
 
     public:
         /**
diff --git a/src/constants.h b/src/constants.h
index 84120a54..b2e7e81e 100644
--- a/src/constants.h
+++ b/src/constants.h
@@ -3,7 +3,7 @@
 #define INC_4NEURO_CONSTANTS_H
 
 namespace lib4neuro {
-    const double E = 2.7182818284590;
+    const double E  = 2.7182818284590;
     const double PI = 3.14159265358979323846;
 }
 
diff --git a/src/examples/CMakeLists.txt b/src/examples/CMakeLists.txt
index d96a9d91..1a7fae38 100644
--- a/src/examples/CMakeLists.txt
+++ b/src/examples/CMakeLists.txt
@@ -2,113 +2,113 @@
 # EXAMPLES #
 ############
 
-add_executable(seminar seminar.cpp)
-target_link_libraries(seminar PUBLIC lib4neuro)
+ADD_EXECUTABLE(seminar seminar.cpp)
+TARGET_LINK_LIBRARIES(seminar PUBLIC lib4neuro)
 
-add_executable(dev_sandbox dev_sandbox.cpp)
-target_link_libraries(dev_sandbox PUBLIC lib4neuro)
+ADD_EXECUTABLE(dev_sandbox dev_sandbox.cpp)
+TARGET_LINK_LIBRARIES(dev_sandbox PUBLIC lib4neuro)
 
-add_executable(net_test_1 net_test_1.cpp)
-target_link_libraries(net_test_1 PUBLIC lib4neuro)
+ADD_EXECUTABLE(net_test_1 net_test_1.cpp)
+TARGET_LINK_LIBRARIES(net_test_1 PUBLIC lib4neuro)
 
-add_executable(net_test_2 net_test_2.cpp)
-target_link_libraries(net_test_2 PUBLIC lib4neuro)
+ADD_EXECUTABLE(net_test_2 net_test_2.cpp)
+TARGET_LINK_LIBRARIES(net_test_2 PUBLIC lib4neuro)
 
-add_executable(net_test_3 net_test_3.cpp)
-target_link_libraries(net_test_3 PUBLIC lib4neuro)
+ADD_EXECUTABLE(net_test_3 net_test_3.cpp)
+TARGET_LINK_LIBRARIES(net_test_3 PUBLIC lib4neuro)
 
-add_executable(net_test_ode_1 net_test_ode_1.cpp)
-target_link_libraries(net_test_ode_1 PUBLIC lib4neuro)
+ADD_EXECUTABLE(net_test_ode_1 net_test_ode_1.cpp)
+TARGET_LINK_LIBRARIES(net_test_ode_1 PUBLIC lib4neuro)
 
-add_executable(net_test_pde_1 net_test_pde_1.cpp)
-target_link_libraries(net_test_pde_1 PUBLIC lib4neuro)
+ADD_EXECUTABLE(net_test_pde_1 net_test_pde_1.cpp)
+TARGET_LINK_LIBRARIES(net_test_pde_1 PUBLIC lib4neuro)
 
-add_executable(network_serialization network_serialization.cpp)
-target_link_libraries(network_serialization PUBLIC lib4neuro)
+ADD_EXECUTABLE(network_serialization network_serialization.cpp)
+TARGET_LINK_LIBRARIES(network_serialization PUBLIC lib4neuro)
 
-add_executable(test_harmonic_oscilator net_test_harmonic_oscilator.cpp)
-target_link_libraries(test_harmonic_oscilator PUBLIC lib4neuro)
+ADD_EXECUTABLE(test_harmonic_oscilator net_test_harmonic_oscilator.cpp)
+TARGET_LINK_LIBRARIES(test_harmonic_oscilator PUBLIC lib4neuro)
 
-add_executable(x2_fitting x2_fitting.cpp)
-target_link_libraries(x2_fitting PUBLIC lib4neuro)
+ADD_EXECUTABLE(x2_fitting x2_fitting.cpp)
+TARGET_LINK_LIBRARIES(x2_fitting PUBLIC lib4neuro)
 
-set(EXAMPLES_OUTPUT_DIR ${PROJECT_BINARY_DIR}/examples)
+SET(EXAMPLES_OUTPUT_DIR ${PROJECT_BINARY_DIR}/examples)
 
-set_target_properties(
-        dev_sandbox
-        net_test_1
-        net_test_2
-        net_test_3
-        net_test_ode_1
-        net_test_pde_1
-        network_serialization
-        test_harmonic_oscilator
-        seminar
-        x2_fitting
+SET_TARGET_PROPERTIES(
+    dev_sandbox
+    net_test_1
+    net_test_2
+    net_test_3
+    net_test_ode_1
+    net_test_pde_1
+    network_serialization
+    test_harmonic_oscilator
+    seminar
+    x2_fitting
 
-        PROPERTIES
-        ARCHIVE_OUTPUT_DIRECTORY $<1:${EXAMPLES_OUTPUT_DIR}>
-        LIBRARY_OUTPUT_DIRECTORY $<1:${EXAMPLES_OUTPUT_DIR}>
-        RUNTIME_OUTPUT_DIRECTORY $<1:${EXAMPLES_OUTPUT_DIR}>
+    PROPERTIES
+    ARCHIVE_OUTPUT_DIRECTORY $<1:${EXAMPLES_OUTPUT_DIR}>
+    LIBRARY_OUTPUT_DIRECTORY $<1:${EXAMPLES_OUTPUT_DIR}>
+    RUNTIME_OUTPUT_DIRECTORY $<1:${EXAMPLES_OUTPUT_DIR}>
 )
 
-target_include_directories(
-        dev_sandbox
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    dev_sandbox
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        net_test_1
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    net_test_1
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        net_test_2
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    net_test_2
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        net_test_3
-        PRIVATE
-        ${ROOT_DIR}/include
-        ${Boost_INCLUDE_DIRS}
+TARGET_INCLUDE_DIRECTORIES(
+    net_test_3
+    PRIVATE
+    ${ROOT_DIR}/include
+    ${Boost_INCLUDE_DIRS}
 )
 
-target_include_directories(
-        net_test_ode_1
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    net_test_ode_1
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        net_test_pde_1
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    net_test_pde_1
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        network_serialization
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    network_serialization
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        test_harmonic_oscilator
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    test_harmonic_oscilator
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        seminar
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    seminar
+    PRIVATE
+    ${ROOT_DIR}/include
 )
 
-target_include_directories(
-        x2_fitting
-        PRIVATE
-        ${ROOT_DIR}/include
+TARGET_INCLUDE_DIRECTORIES(
+    x2_fitting
+    PRIVATE
+    ${ROOT_DIR}/include
 )
diff --git a/src/examples/dev_sandbox.cpp b/src/examples/dev_sandbox.cpp
index 86d05284..fe257beb 100644
--- a/src/examples/dev_sandbox.cpp
+++ b/src/examples/dev_sandbox.cpp
@@ -19,9 +19,9 @@ int main(int argc,
          char** argv) {
 
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
-    for (int i = 0; i < 3; i++) {
+    for (int i                  = 0; i < 3; i++) {
         inp.push_back(i);
         out.push_back(i + 4);
     }
@@ -30,8 +30,8 @@ int main(int argc,
                                          out));
 
     lib4neuro::DataSet DataSet(&data_vec);
-    int elements = DataSet.get_n_elements();
-    std::string filename = "/home/martin/4Neuro/build/unit-tests/testDataSet";
+    int                elements = DataSet.get_n_elements();
+    std::string        filename = "/home/martin/4Neuro/build/unit-tests/testDataSet";
     DataSet.store_text(filename);
 
     //Test of correct file creations
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index d106b92c..0816d644 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -15,15 +15,15 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 50;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -32,18 +32,18 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(ef);
 
@@ -52,16 +52,16 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     /* ERROR CALCULATION */
     std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 }
 
 void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
                                    l4n::ErrorFunction& ef) {
 
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
     l4n::GradientDescentBB gd(1e-6,
                               1000);
 
@@ -76,21 +76,21 @@ void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
 int main() {
 
     std::cout
-            << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons"
-            << std::endl;
+        << "Running lib4neuro example   1: Basic use of the particle swarm or gradient method to train a simple network with few linear neurons"
+        << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50" << std::endl;
     std::cout << "1 * w1 + 0.5*w2 = 0.75" << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 1};
     out = {0.5};
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index b3ef28ec..345bbb88 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -14,15 +14,15 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 50;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -31,18 +31,18 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(ef);
 
@@ -50,8 +50,8 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
 
     std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 }
 
 void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
@@ -67,28 +67,28 @@ void optimize_via_gradient_descent(l4n::NeuralNetwork& net,
     /* ERROR CALCULATION */
     std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval(nullptr) << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 }
 
 int main() {
     std::cout
-            << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights"
-            << std::endl;
+        << "Running lib4neuro example   2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
     std::cout << " 0 * w1 + 1 * w2 = 0.50 + b1" << std::endl;
     std::cout << " 1 * w1 + 0.5*w2 = 0.75 + b1" << std::endl;
     std::cout << "(1.25 + b2) * w2 = 0.63 + b3" << std::endl;
     std::cout
-            << "***********************************************************************************************************************"
-            << std::endl;
+        << "***********************************************************************************************************************"
+        << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 1, 0};
     out = {0.5, 0};
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index 5d382da8..d87e606c 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -41,8 +41,8 @@ void calculate_gradient_analytical(std::vector<double>& input,
                                    size_t n_hidden_neurons,
                                    std::vector<double>& gradient_analytical) {
 
-    double a, b, y, x = input[0];
-    for (size_t i = 0; i < n_hidden_neurons; ++i) {
+    double      a, b, y, x = input[0];
+    for (size_t i          = 0; i < n_hidden_neurons; ++i) {
         a = parameter_weights[i];
         b = parameter_biases[i];
         y = parameter_weights[n_hidden_neurons + i];
@@ -50,7 +50,7 @@ void calculate_gradient_analytical(std::vector<double>& input,
         gradient_analytical[i] += y * x * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
         gradient_analytical[n_hidden_neurons + i] += 1.0 / ((1 + std::exp(b - a * x)));
         gradient_analytical[2 * n_hidden_neurons + i] -=
-                y * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
+            y * std::exp(b - a * x) / ((1 + std::exp(b - a * x)) * (1 + std::exp(b - a * x)));
     }
 
 }
@@ -58,7 +58,7 @@ void calculate_gradient_analytical(std::vector<double>& input,
 int main(int argc,
          char** argv) {
 
-    int n_tests = 2;
+    int n_tests          = 2;
     int n_hidden_neurons = 2;
     try {
         /* Numbers of neurons in layers (including input and output layers) */
@@ -71,18 +71,18 @@ int main(int argc,
         std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC,
                                                        l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC,
                                                        l4n::NEURON_TYPE::LOGISTIC}; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
-        l4n::FullyConnectedFFN nn1(&neuron_numbers_in_layers,
-                                   &hidden_type_v);
+        l4n::FullyConnectedFFN        nn1(&neuron_numbers_in_layers,
+                                          &hidden_type_v);
         nn1.randomize_parameters();
 
-        boost::random::mt19937 gen(std::time(0));
+        boost::random::mt19937                     gen(std::time(0));
         boost::random::uniform_real_distribution<> dist(-1,
                                                         1);
 
-        size_t n_parameters = nn1.get_n_weights() + nn1.get_n_biases();
+        size_t              n_parameters = nn1.get_n_weights() + nn1.get_n_biases();
         std::vector<double> gradient_backprogation(n_parameters);
         std::vector<double> gradient_analytical(n_parameters);
-        std::vector<double>* parameter_biases = nn1.get_parameter_ptr_biases();
+        std::vector<double>* parameter_biases  = nn1.get_parameter_ptr_biases();
         std::vector<double>* parameter_weights = nn1.get_parameter_ptr_weights();
         std::vector<double> error_derivative = {1};
 
@@ -93,7 +93,7 @@ int main(int argc,
             std::vector<double> input(1);
             std::vector<double> output(1);
 
-            input[0] = dist(gen);
+            input[0]  = dist(gen);
             output[0] = 0;
 
 
diff --git a/src/examples/net_test_harmonic_oscilator.cpp b/src/examples/net_test_harmonic_oscilator.cpp
index 98f5c5ad..e216200c 100644
--- a/src/examples/net_test_harmonic_oscilator.cpp
+++ b/src/examples/net_test_harmonic_oscilator.cpp
@@ -63,16 +63,16 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
 
     printf("Solution via the particle swarm optimization!\n");
     std::vector<double> domain_bounds(
-            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
+        2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
     double c1 = 1.7;
     double c2 = 1.7;
-    double w = 0.700;
+    double w  = 0.700;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -81,18 +81,18 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        max_iters
     );
 
     solver.solve(swarm);
@@ -122,12 +122,12 @@ void test_harmonic_oscilator_fixed_E(double EE,
                                      size_t n_particles) {
     std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     /* SOLVER SETUP */
-    size_t n_inputs = 1;
-    size_t n_equations = 1;
+    size_t        n_inputs    = 1;
+    size_t        n_equations = 1;
     l4n::DESolver solver(n_equations,
                          n_inputs,
                          n_inner_neurons);
@@ -171,8 +171,8 @@ void test_harmonic_oscilator_fixed_E(double EE,
         data_vec_g.emplace_back(std::make_pair(inp,
                                                out));
     }
-    inp = {0.0};
-    out = {1.0};
+    inp                 = {0.0};
+    out                 = {1.0};
     data_vec_g.emplace_back(std::make_pair(inp,
                                            out));
 
@@ -199,32 +199,32 @@ void test_harmonic_oscilator_fixed_E(double EE,
 int main() {
     std::cout << "Running lib4neuro harmonic Oscilator example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "          Governing equation: -y''(x) + x^2 * y(x) = E * y(x)" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
-            << std::endl;
+        << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
-    double EE = -1.0;
+    double       EE              = -1.0;
     unsigned int n_inner_neurons = 2;
-    unsigned int train_size = 10;
-    double accuracy = 1e-3;
-    double ds = -5.0;
-    double de = 5.0;
+    unsigned int train_size      = 10;
+    double       accuracy        = 1e-3;
+    double       ds              = -5.0;
+    double       de              = 5.0;
 
     unsigned int test_size = 300;
-    double ts = -6.0;
-    double te = 6.0;
+    double       ts        = -6.0;
+    double       te        = 6.0;
 
     size_t particle_swarm_max_iters = 1000;
-    size_t n_particles = 100;
+    size_t n_particles              = 100;
     test_harmonic_oscilator_fixed_E(EE,
                                     accuracy,
                                     n_inner_neurons,
diff --git a/src/examples/net_test_ode_1.cpp b/src/examples/net_test_ode_1.cpp
index f35bd0b3..28c48ec4 100644
--- a/src/examples/net_test_ode_1.cpp
+++ b/src/examples/net_test_ode_1.cpp
@@ -28,16 +28,16 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
 
     printf("Solution via the particle swarm optimization!\n");
     std::vector<double> domain_bounds(
-            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
+        2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
     double c1 = 1.7;
     double c2 = 1.7;
-    double w = 0.700;
+    double w  = 0.700;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -46,18 +46,18 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        max_iters
     );
 
     solver.solve(swarm);
@@ -88,8 +88,8 @@ void export_solution(size_t n_test_points,
                      l4n::MultiIndex& alpha_1,
                      l4n::MultiIndex& alpha_2,
                      const std::string prefix) {
-    l4n::NeuralNetwork* solution = solver.get_solution(alpha_0);
-    l4n::NeuralNetwork* solution_d = solver.get_solution(alpha_1);
+    l4n::NeuralNetwork* solution    = solver.get_solution(alpha_0);
+    l4n::NeuralNetwork* solution_d  = solver.get_solution(alpha_1);
     l4n::NeuralNetwork* solution_dd = solver.get_solution(alpha_2);
 
     /* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
@@ -145,8 +145,8 @@ void export_solution(size_t n_test_points,
     ofs.close();
 
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
 }
 
@@ -163,12 +163,12 @@ void test_ode(double accuracy,
 
     std::cout << "Finding a solution via the Particle Swarm Optimization and Gradient descent method!" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     /* SOLVER SETUP */
-    size_t n_inputs = 1;
-    size_t n_equations = 3;
+    size_t        n_inputs    = 1;
+    size_t        n_equations = 3;
     l4n::DESolver solver_01(n_equations,
                             n_inputs,
                             n_inner_neurons);
@@ -210,7 +210,7 @@ void test_ode(double accuracy,
 
     /* TRAIN DATA FOR THE GOVERNING DE */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
-    std::vector<double> test_points(train_size);
+    std::vector<double>                                              test_points(train_size);
 
 
     /* ISOTROPIC TRAIN SET */
@@ -272,7 +272,7 @@ void test_ode(double accuracy,
                     alpha_2,
                     "gradient_");
 
-    auto end = std::chrono::system_clock::now();
+    auto                          end             = std::chrono::system_clock::now();
     std::chrono::duration<double> elapsed_seconds = end - start;
     std::cout << "elapsed time: " << elapsed_seconds.count() << std::endl;
 }
@@ -280,33 +280,33 @@ void test_ode(double accuracy,
 int main() {
     std::cout << "Running lib4neuro Ordinary Differential Equation example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "          Governing equation: y''(x) + 4y'(x) + 4y(x) = 0.0, for x in [0, 4]" << std::endl;
     std::cout << "Dirichlet boundary condition:                  y(0.0) = 1.0" << std::endl;
     std::cout << "  Neumann boundary condition:                 y'(0.0) = 1.0" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
-            << std::endl;
+        << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     unsigned int n_inner_neurons = 2;
-    unsigned int train_size = 10;
-    double accuracy = 1e-1;
-    double ds = 0.0;
-    double de = 4.0;
+    unsigned int train_size      = 10;
+    double       accuracy        = 1e-1;
+    double       ds              = 0.0;
+    double       de              = 4.0;
 
     unsigned int test_size = 10;
-    double ts = ds;
-    double te = de + 2;
+    double       ts        = ds;
+    double       te        = de + 2;
 
     size_t particle_swarm_max_iters = 10;
-    size_t n_particles = 2;
+    size_t n_particles              = 2;
 
     test_ode(accuracy,
              n_inner_neurons,
diff --git a/src/examples/net_test_pde_1.cpp b/src/examples/net_test_pde_1.cpp
index fe26ea79..b69cd919 100644
--- a/src/examples/net_test_pde_1.cpp
+++ b/src/examples/net_test_pde_1.cpp
@@ -31,16 +31,16 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
 
     printf("Solution via the particle swarm optimization!\n");
     std::vector<double> domain_bounds(
-            2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
+        2 * (solver.get_solution(alpha)->get_n_biases() + solver.get_solution(alpha)->get_n_weights()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
     double c1 = 1.7;
     double c2 = 1.7;
-    double w = 0.700;
+    double w  = 0.700;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -49,18 +49,18 @@ void optimize_via_particle_swarm(l4n::DESolver& solver,
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            max_iters
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        max_iters
     );
 
     solver.solve(swarm);
@@ -84,8 +84,8 @@ void export_solution(size_t n_test_points,
                      l4n::MultiIndex& alpha_01,
                      l4n::MultiIndex& alpha_20,
                      const std::string prefix) {
-    l4n::NeuralNetwork* solution = solver.get_solution(alpha_00);
-    l4n::NeuralNetwork* solution_t = solver.get_solution(alpha_01);
+    l4n::NeuralNetwork* solution    = solver.get_solution(alpha_00);
+    l4n::NeuralNetwork* solution_t  = solver.get_solution(alpha_01);
     l4n::NeuralNetwork* solution_xx = solver.get_solution(alpha_20);
 
     size_t i, j;
@@ -105,13 +105,13 @@ void export_solution(size_t n_test_points,
     std::cout.flush();
 
     std::vector<double> input(2), output(1), output_t(1), output_xx(1);
-    std::ofstream ofs(final_fn,
-                      std::ofstream::out);
-    double frac = (te - ts) / (n_test_points - 1);
+    std::ofstream       ofs(final_fn,
+                            std::ofstream::out);
+    double              frac        = (te - ts) / (n_test_points - 1);
     for (i = 0; i < n_test_points; ++i) {
-        x = i * frac + ts;
+        x      = i * frac + ts;
         for (j = 0; j < n_test_points; ++j) {
-            t = j * frac + ts;
+            t     = j * frac + ts;
             input = {x, t};
 
             solution->eval_single(input,
@@ -142,9 +142,9 @@ void export_solution(size_t n_test_points,
            final_fn.c_str(),
            0.0);
     for (i = 0; i < n_test_points; ++i) {
-        x = i * frac + ts;
+        x      = i * frac + ts;
         for (j = 0; j < n_test_points; ++j) {
-            t = j * frac + ts;
+            t     = j * frac + ts;
             input = {x, t};
 
             solution_t->eval_single(input,
@@ -220,8 +220,8 @@ void export_solution(size_t n_test_points,
     ofs.close();
 
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 }
 
 void test_pde(double accuracy,
@@ -236,8 +236,8 @@ void test_pde(double accuracy,
               size_t n_particles) {
 
     /* do not change below */
-    size_t n_inputs = 2;
-    size_t n_equations = 3;
+    size_t        n_inputs    = 2;
+    size_t        n_equations = 3;
     l4n::DESolver solver_01(n_equations,
                             n_inputs,
                             n_inner_neurons);
@@ -289,7 +289,7 @@ void test_pde(double accuracy,
                                                       out));
         }
     }
-    l4n::DataSet ds_00(&data_vec_zero);
+    l4n::DataSet      ds_00(&data_vec_zero);
 
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_t;
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_x;
@@ -355,37 +355,37 @@ void test_pde(double accuracy,
 int main() {
     std::cout << "Running lib4neuro Partial Differential Equation example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "          Governing equation: y_xx - y_t = 0,                                   for (x, t) in [0, 1] x [0, 1]"
-            << std::endl;
+        << "          Governing equation: y_xx - y_t = 0,                                   for (x, t) in [0, 1] x [0, 1]"
+        << std::endl;
     std::cout << "Dirichlet boundary condition:    y(0, t) = sin(t),                              for t in [0, 1]"
               << std::endl;
     std::cout << "Dirichlet boundary condition:    y(x, 0) = exp(-sqrt(0.5)x) * sin(-sqrt(0.5)x), for x in [0, 1]"
               << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout
-            << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons"
-            << std::endl;
+        << "Expressing solution as y(x, t) = sum over [a_i / (1 + exp(bi - wxi*x - wti*t))], i in [1, n], where n is the number of hidden neurons"
+        << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     unsigned int n_inner_neurons = 2;
-    unsigned int train_size = 5;
-    double accuracy = 1e-1;
-    double ds = 0.0;
-    double de = 1.0;
+    unsigned int train_size      = 5;
+    double       accuracy        = 1e-1;
+    double       ds              = 0.0;
+    double       de              = 1.0;
 
     unsigned int test_size = 10;
-    double ts = ds;
-    double te = de + 0;
+    double       ts        = ds;
+    double       te        = de + 0;
 
     size_t particle_swarm_max_iters = 10;
-    size_t n_particles = 5;
+    size_t n_particles              = 5;
     test_pde(accuracy,
              n_inner_neurons,
              train_size,
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index 924ecef2..a65ff0cd 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -12,24 +12,24 @@
 int main() {
     std::cout << "Running lib4neuro Serialization example   1" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "First, it finds an approximate solution to the system of equations below:" << std::endl;
     std::cout << "0 * w1 + 1 * w2 = 0.50 + b" << std::endl;
     std::cout << "1 * w1 + 0.5*w2 = 0.75 + b" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "Then it stores the network with its weights into a file via serialization" << std::endl;
     std::cout << "Then it loads the network from a file via serialization" << std::endl;
     std::cout << "Finally it tests the loaded network parameters by evaluating the error function" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 1};
     out = {0.5};
@@ -97,15 +97,15 @@ int main() {
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 5;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -114,18 +114,18 @@ int main() {
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(mse);
 
@@ -142,14 +142,14 @@ int main() {
 
     /* SAVE NETWORK TO THE FILE */
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     std::cout << "Network generated by the example" << std::endl;
     net.write_stats();
     net.save_text("saved_network.4nt");
     std::cout
-            << "--------------------------------------------------------------------------------------------------------------------------------------------"
-            << std::endl;
+        << "--------------------------------------------------------------------------------------------------------------------------------------------"
+        << std::endl;
     double error = 0.0;
     inp = {0, 1};
     net.eval_single(inp,
@@ -164,17 +164,17 @@ int main() {
     std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
     std::cout << "Error of the network: " << 0.5 * error << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
     std::cout << "Network loaded from a file" << std::endl;
     l4n::NeuralNetwork net2("saved_network.4nt");
     net2.write_stats();
     std::cout
-            << "--------------------------------------------------------------------------------------------------------------------------------------------"
-            << std::endl;
+        << "--------------------------------------------------------------------------------------------------------------------------------------------"
+        << std::endl;
     error = 0.0;
-    inp = {0, 1};
+    inp   = {0, 1};
     net2.eval_single(inp,
                      out);
     error += (0.5 - out[0]) * (0.5 - out[0]);
@@ -187,7 +187,7 @@ int main() {
     std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
     std::cout << "Error of the network: " << 0.5 * error << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
     return 0;
 }
\ No newline at end of file
diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp
index c883afd4..11926803 100644
--- a/src/examples/seminar.cpp
+++ b/src/examples/seminar.cpp
@@ -16,26 +16,26 @@ int main() {
 
     std::cout << std::endl << "Running lib4neuro Moldyn Seminar example" << std::endl;
     std::cout
-            << "********************************************************************************************************************************************"
-            << std::endl;
+        << "********************************************************************************************************************************************"
+        << std::endl;
 
 
-    l4n::NeuralNetwork XOR;
+    l4n::NeuralNetwork                 XOR;
     std::shared_ptr<l4n::NeuronLinear> in1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> in2 = std::make_shared<l4n::NeuronLinear>();
-    size_t i1 = XOR.add_neuron(in1,
-                               l4n::BIAS_TYPE::NO_BIAS);
-    size_t i2 = XOR.add_neuron(in2,
-                               l4n::BIAS_TYPE::NO_BIAS);
+    size_t                             i1  = XOR.add_neuron(in1,
+                                                            l4n::BIAS_TYPE::NO_BIAS);
+    size_t                             i2  = XOR.add_neuron(in2,
+                                                            l4n::BIAS_TYPE::NO_BIAS);
 
     std::shared_ptr<l4n::NeuronLogistic> hn1 = std::make_shared<l4n::NeuronLogistic>();
     std::shared_ptr<l4n::NeuronLogistic> hn2 = std::make_shared<l4n::NeuronLogistic>();
-    size_t h1 = XOR.add_neuron(hn1);
-    size_t h2 = XOR.add_neuron(hn2);
+    size_t                               h1  = XOR.add_neuron(hn1);
+    size_t                               h2  = XOR.add_neuron(hn2);
 
     std::shared_ptr<l4n::NeuronLinear> on1 = std::make_shared<l4n::NeuronLinear>();
-    size_t o1 = XOR.add_neuron(on1,
-                               l4n::BIAS_TYPE::NO_BIAS);
+    size_t                             o1  = XOR.add_neuron(on1,
+                                                            l4n::BIAS_TYPE::NO_BIAS);
 
     XOR.add_connection_simple(i1,
                               h1);
@@ -54,7 +54,7 @@ int main() {
 
     /* TRAIN DATA DEFINITION */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-    std::vector<double> inp, out;
+    std::vector<double>                                              inp, out;
 
     inp = {0, 0};
     out = {0};
@@ -100,15 +100,15 @@ int main() {
     std::vector<double> domain_bounds(2 * (XOR.get_n_weights() + XOR.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i] = -10;
+        domain_bounds[2 * i]     = -10;
         domain_bounds[2 * i + 1] = 10;
     }
 
-    double c1 = 1.7;
-    double c2 = 1.7;
-    double w = 0.7;
+    double c1          = 1.7;
+    double c2          = 1.7;
+    double w           = 0.7;
     size_t n_particles = 5;
-    size_t iter_max = 10;
+    size_t iter_max    = 10;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -117,18 +117,18 @@ int main() {
     /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
      * terminating criterion is met ('n' is the total number of particles) */
     double epsilon = 0.02;
-    double delta = 0.7;
+    double delta   = 0.7;
 
     l4n::ParticleSwarm swarm_01(
-            &domain_bounds,
-            c1,
-            c2,
-            w,
-            gamma,
-            epsilon,
-            delta,
-            n_particles,
-            iter_max
+        &domain_bounds,
+        c1,
+        c2,
+        w,
+        gamma,
+        epsilon,
+        delta,
+        n_particles,
+        iter_max
     );
     swarm_01.optimize(mse);
 
diff --git a/src/examples/x2_fitting.cpp b/src/examples/x2_fitting.cpp
index 86936ae3..82821541 100644
--- a/src/examples/x2_fitting.cpp
+++ b/src/examples/x2_fitting.cpp
@@ -9,15 +9,15 @@ int main() {
                           true);
     reader.read();
 
-    std::vector<unsigned int> input_ind = {0};
-    std::vector<unsigned int> output_ind = {1};
-    std::shared_ptr<l4n::DataSet> ds = reader.get_data_set(&input_ind,
-                                                           &output_ind);
-
-    std::vector<unsigned int> neuron_numbers_in_layers = {1, 15, 1};
-    std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC};
-    l4n::FullyConnectedFFN net(&neuron_numbers_in_layers,
-                               &hidden_type_v);
+    std::vector<unsigned int>     input_ind  = {0};
+    std::vector<unsigned int>     output_ind = {1};
+    std::shared_ptr<l4n::DataSet> ds         = reader.get_data_set(&input_ind,
+                                                                   &output_ind);
+
+    std::vector<unsigned int>     neuron_numbers_in_layers = {1, 15, 1};
+    std::vector<l4n::NEURON_TYPE> hidden_type_v            = {l4n::NEURON_TYPE::LOGISTIC};
+    l4n::FullyConnectedFFN        net(&neuron_numbers_in_layers,
+                                      &hidden_type_v);
 
     l4n::MSE mse(&net,
                  ds.get());
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 66ea48d7..cd405224 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -2,57 +2,57 @@
 # UNIT TESTS #
 ##############
 
-add_executable(linear_neuron_test NeuronLinear_test.cpp)
-target_link_libraries(linear_neuron_test lib4neuro boost_unit_test)
-target_include_directories(linear_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(linear_neuron_test NeuronLinear_test.cpp)
+TARGET_LINK_LIBRARIES(linear_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(linear_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(constant_neuron_test NeuronConstant_test.cpp)
-target_link_libraries(constant_neuron_test lib4neuro boost_unit_test)
-target_include_directories(constant_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(constant_neuron_test NeuronConstant_test.cpp)
+TARGET_LINK_LIBRARIES(constant_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(constant_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(binary_neuron_test NeuronBinary_test.cpp)
-target_link_libraries(binary_neuron_test lib4neuro boost_unit_test)
-target_include_directories(binary_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(binary_neuron_test NeuronBinary_test.cpp)
+TARGET_LINK_LIBRARIES(binary_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(binary_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(logistic_neuron_test NeuronLogistic_test.cpp)
-target_link_libraries(logistic_neuron_test lib4neuro boost_unit_test)
-target_include_directories(logistic_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(logistic_neuron_test NeuronLogistic_test.cpp)
+TARGET_LINK_LIBRARIES(logistic_neuron_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(logistic_neuron_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
-target_link_libraries(connectionFunctionGeneral_test lib4neuro boost_unit_test)
-target_include_directories(connectionFunctionGeneral_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(connectionFunctionGeneral_test ConnectionFunctionGeneral_test.cpp)
+TARGET_LINK_LIBRARIES(connectionFunctionGeneral_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(connectionFunctionGeneral_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(neural_network_test NeuralNetwork_test.cpp)
-target_link_libraries(neural_network_test lib4neuro boost_unit_test)
-target_include_directories(neural_network_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(neural_network_test NeuralNetwork_test.cpp)
+TARGET_LINK_LIBRARIES(neural_network_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(neural_network_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
-target_link_libraries(connection_Function_identity_test lib4neuro boost_unit_test)
-target_include_directories(connection_Function_identity_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(connection_Function_identity_test ConnectionFunctionIdentity_test.cpp)
+TARGET_LINK_LIBRARIES(connection_Function_identity_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(connection_Function_identity_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(dataset_test DataSet_test.cpp)
-target_link_libraries(dataset_test lib4neuro boost_unit_test)
-target_include_directories(dataset_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(dataset_test DataSet_test.cpp)
+TARGET_LINK_LIBRARIES(dataset_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(dataset_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(errorfunction_test ErrorFunctions_test.cpp)
-target_link_libraries(errorfunction_test lib4neuro boost_unit_test)
-target_include_directories(errorfunction_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(errorfunction_test ErrorFunctions_test.cpp)
+TARGET_LINK_LIBRARIES(errorfunction_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(errorfunction_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(particle_swarm_test ParticleSwarm_test.cpp)
-target_link_libraries(particle_swarm_test lib4neuro boost_unit_test)
-target_include_directories(particle_swarm_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(particle_swarm_test ParticleSwarm_test.cpp)
+TARGET_LINK_LIBRARIES(particle_swarm_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(particle_swarm_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(particle_test Particle_test.cpp)
-target_link_libraries(particle_test lib4neuro boost_unit_test)
-target_include_directories(particle_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(particle_test Particle_test.cpp)
+TARGET_LINK_LIBRARIES(particle_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(particle_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
-target_link_libraries(NeuralNetworkSum_test lib4neuro boost_unit_test)
-target_include_directories(NeuralNetworkSum_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(NeuralNetworkSum_test NeuralNetworkSum_test.cpp)
+TARGET_LINK_LIBRARIES(NeuralNetworkSum_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(NeuralNetworkSum_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
-add_executable(DESolver_test DESolver_test.cpp)
-target_link_libraries(DESolver_test lib4neuro boost_unit_test)
-target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
+ADD_EXECUTABLE(DESolver_test DESolver_test.cpp)
+TARGET_LINK_LIBRARIES(DESolver_test lib4neuro boost_unit_test)
+TARGET_INCLUDE_DIRECTORIES(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 #TODO fix GradientDescent test
 #add_executable(GradientDescent_test GradientDescent_test.cpp)
@@ -60,7 +60,7 @@ target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_
 #target_include_directories(GradientDescent_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
 
-set(TEST_OUTPUT_DIR ${PROJECT_BINARY_DIR}/tests)
+SET(TEST_OUTPUT_DIR ${PROJECT_BINARY_DIR}/tests)
 
 SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
 SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
@@ -69,32 +69,32 @@ SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
 SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR})
 SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR})
 
-set_target_properties(
-        linear_neuron_test
-        constant_neuron_test
-        binary_neuron_test
-        logistic_neuron_test
-        connectionFunctionGeneral_test
-        connection_Function_identity_test
-        neural_network_test
-        dataset_test
-        particle_swarm_test
-        particle_test
-        NeuralNetworkSum_test
-        errorfunction_test
-        DESolver_test
-        #    GradientDescent_test
-
-
-        PROPERTIES
-        ARCHIVE_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
-        LIBRARY_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
-        RUNTIME_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
-        #CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
-        #CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
-        #CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
-        #CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
-        #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
-        #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
+SET_TARGET_PROPERTIES(
+    linear_neuron_test
+    constant_neuron_test
+    binary_neuron_test
+    logistic_neuron_test
+    connectionFunctionGeneral_test
+    connection_Function_identity_test
+    neural_network_test
+    dataset_test
+    particle_swarm_test
+    particle_test
+    NeuralNetworkSum_test
+    errorfunction_test
+    DESolver_test
+    #    GradientDescent_test
+
+
+    PROPERTIES
+    ARCHIVE_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
+    LIBRARY_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
+    RUNTIME_OUTPUT_DIRECTORY $<1:${TEST_OUTPUT_DIR}>
+    #CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
+    #CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
+    #CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
+    #CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
+    #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${TEST_OUTPUT_DIR}
+    #CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${TEST_OUTPUT_DIR}
 )
 
diff --git a/src/tests/ConnectionFunctionGeneral_test.cpp b/src/tests/ConnectionFunctionGeneral_test.cpp
index 7e34861f..2afafe68 100644
--- a/src/tests/ConnectionFunctionGeneral_test.cpp
+++ b/src/tests/ConnectionFunctionGeneral_test.cpp
@@ -35,7 +35,7 @@ BOOST_AUTO_TEST_SUITE(Connection_test)
         param_indices.push_back(0);
         std::string paramToFunction = "this do nothing! Why is it here?";
         BOOST_CHECK_NO_THROW(ConnectionFunctionGeneral* functionGeneral = new ConnectionFunctionGeneral(param_indices,
-                                     paramToFunction));
+                                 paramToFunction));
     }
 
 
diff --git a/src/tests/DESolver_test.cpp b/src/tests/DESolver_test.cpp
index f2c5ca0e..aaf1bd52 100644
--- a/src/tests/DESolver_test.cpp
+++ b/src/tests/DESolver_test.cpp
@@ -126,7 +126,7 @@ BOOST_AUTO_TEST_SUITE(DESolver_test)
                           std::invalid_argument);
         BOOST_CHECK_NO_THROW(DESolver deSolver(1,
                                                1,
-                                     1));
+                                 1));
 
         //TODO fix it
         //std::stringstream buffer1;
diff --git a/src/tests/DataSet_test.cpp b/src/tests/DataSet_test.cpp
index 5197f4a1..15762e12 100644
--- a/src/tests/DataSet_test.cpp
+++ b/src/tests/DataSet_test.cpp
@@ -49,7 +49,7 @@ BOOST_AUTO_TEST_SUITE(DataSet_test)
  */
     BOOST_AUTO_TEST_CASE(DataSet_construction_from_vector_test) {
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-        std::vector<double> inp, out;
+        std::vector<double>                                              inp, out;
 
         for (int i = 0; i < 3; i++) {
             inp.push_back(i);
diff --git a/src/tests/ErrorFunctions_test.cpp b/src/tests/ErrorFunctions_test.cpp
index 94e1f3a6..2364dfda 100644
--- a/src/tests/ErrorFunctions_test.cpp
+++ b/src/tests/ErrorFunctions_test.cpp
@@ -100,7 +100,7 @@ MOCK_BASE_CLASS(mock_network,
     MOCK_METHOD(write_weights,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id3)
 
     MOCK_METHOD(write_biases,
@@ -117,7 +117,7 @@ MOCK_BASE_CLASS(mock_network,
     MOCK_METHOD(write_biases,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id6)
 
     MOCK_METHOD(write_stats,
@@ -134,7 +134,7 @@ MOCK_BASE_CLASS(mock_network,
     MOCK_METHOD(write_stats,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id9)
 };
 
@@ -143,8 +143,8 @@ MOCK_BASE_CLASS(mock_dataSet,
 ) {
     mock_dataSet(std::vector<std::pair<std::vector<double>, std::vector<double>>>
                  * i)
-            :
-            lib4neuro::DataSet(i) {
+        :
+        lib4neuro::DataSet(i) {
 
     }
 
@@ -175,7 +175,7 @@ MOCK_BASE_CLASS(mock_dataSet,
     MOCK_METHOD(store_data_text,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id2)
 };
 
@@ -190,7 +190,7 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
         mock_network network;
         MOCK_EXPECT(network.get_n_biases).returns(1);
         MOCK_EXPECT(network.get_n_weights).returns(1);
-        std::vector<double> inp, out;
+        std::vector<double>                                              inp, out;
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
         inp = {0.0};
         out = {8.0};
@@ -202,7 +202,7 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
         mock_dataSet dataSet(&data_vec_dy);
 
         BOOST_CHECK_NO_THROW(MSE mse(&network,
-                                     &dataSet));
+                                 &dataSet));
     }
 
     BOOST_AUTO_TEST_CASE(ErrorFunction_MSE_Eval_Test) {
@@ -211,8 +211,8 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
         MOCK_EXPECT(network.get_n_weights).returns(1);
         MOCK_EXPECT(network.eval_single);
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-        std::vector<double> inp, out;
-        for (int i = 0; i < 1; i++) {
+        std::vector<double>                                              inp, out;
+        for (int                                                         i = 0; i < 1; i++) {
             inp.push_back(i);
             out.push_back(i + 4);
         }
@@ -236,8 +236,8 @@ BOOST_AUTO_TEST_SUITE(ErrorFunctions_test);
         MOCK_EXPECT(network.get_n_biases).returns(1);
         MOCK_EXPECT(network.get_n_weights).returns(1);
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-        std::vector<double> inp, out;
-        for (int i = 0; i < 1; i++) {
+        std::vector<double>                                              inp, out;
+        for (int                                                         i = 0; i < 1; i++) {
             inp.push_back(i);
             out.push_back(i + 4);
         }
diff --git a/src/tests/NeuralNetworkSum_test.cpp b/src/tests/NeuralNetworkSum_test.cpp
index 9fcfe501..5436159c 100644
--- a/src/tests/NeuralNetworkSum_test.cpp
+++ b/src/tests/NeuralNetworkSum_test.cpp
@@ -102,7 +102,7 @@ MOCK_BASE_CLASS(mock_network,
     MOCK_METHOD(write_weights,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id3)
 
     MOCK_METHOD(write_biases,
@@ -119,7 +119,7 @@ MOCK_BASE_CLASS(mock_network,
     MOCK_METHOD(write_biases,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id6)
 
     MOCK_METHOD(write_stats,
@@ -136,7 +136,7 @@ MOCK_BASE_CLASS(mock_network,
     MOCK_METHOD(write_stats,
                 1,
                 void(std::ofstream
-                        *),
+                    *),
                 id9)
 };
 
@@ -153,10 +153,10 @@ BOOST_AUTO_TEST_SUITE(NeuralNetworkSum_test)
     }
 
     BOOST_AUTO_TEST_CASE(NeuralNetworkSum_add_network_test) {
-        mock_network network;
+        mock_network     network;
         //NeuralNetwork network;
         NeuralNetworkSum networkSum;
-        std::string po = "f(x,y,z,t) =x+y+z+t";
+        std::string      po = "f(x,y,z,t) =x+y+z+t";
         BOOST_CHECK_NO_THROW(networkSum.add_network(&network,
                                                     po));
     }
@@ -171,7 +171,7 @@ BOOST_AUTO_TEST_SUITE(NeuralNetworkSum_test)
         std::vector<double> output;
         output.push_back(1);
 
-        double weights = 5;
+        double           weights = 5;
         NeuralNetworkSum networkSum;
         networkSum.add_network(&network,
                                "f(x) =x");
diff --git a/src/tests/NeuralNetwork_test.cpp b/src/tests/NeuralNetwork_test.cpp
index d61f2d1a..a2eaa16c 100644
--- a/src/tests/NeuralNetwork_test.cpp
+++ b/src/tests/NeuralNetwork_test.cpp
@@ -86,7 +86,7 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
     BOOST_AUTO_TEST_CASE(NeuralNetwork_add_connection_simple_test) {
         std::shared_ptr<mock_NeuronLinear> n1(new mock_NeuronLinear);
         std::shared_ptr<mock_NeuronLinear> n2(new mock_NeuronLinear);
-        NeuralNetwork network;
+        NeuralNetwork                      network;
         network.add_neuron(n1,
                            BIAS_TYPE::NO_BIAS);
         network.add_neuron(n2,
@@ -124,8 +124,8 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
  * Test of add_connection_general method
  */
     BOOST_AUTO_TEST_CASE(NeuralNetwork_specify_inputs_neurons_test) {
-        NeuralNetwork network;
-        mock_NeuronLinear po;
+        NeuralNetwork                      network;
+        mock_NeuronLinear                  po;
         std::shared_ptr<mock_NeuronLinear> n1(new mock_NeuronLinear());
 
         network.add_neuron(n1,
@@ -142,7 +142,7 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
     }
 
     BOOST_AUTO_TEST_CASE(NeuralNetwork_specify_outputs_neurons_test) {
-        NeuralNetwork network;
+        NeuralNetwork                      network;
         std::shared_ptr<mock_NeuronLinear> n1(new mock_NeuronLinear);
         network.add_neuron(n1,
                            BIAS_TYPE::NO_BIAS);
@@ -218,7 +218,7 @@ BOOST_AUTO_TEST_SUITE(NeuralNetwork_test)
         for (int i = 0; i < 100; i++) {
             sum += weights->at(i);
         }
-        sum = sum / 100;
+        sum        = sum / 100;
         BOOST_CHECK(sum < 0.15 && sum > -0.15);
     }
 
diff --git a/src/tests/ParticleSwarm_test.cpp b/src/tests/ParticleSwarm_test.cpp
index 613d4272..db758f8e 100644
--- a/src/tests/ParticleSwarm_test.cpp
+++ b/src/tests/ParticleSwarm_test.cpp
@@ -41,7 +41,7 @@ BOOST_AUTO_TEST_SUITE(ParticleSwarm_test)
                                                  0.05,
                                                  0.5,
                                                  0,
-                                     20));
+                                 20));
     }
 
     BOOST_AUTO_TEST_CASE(ParticleSwarm_optimalize_and_get_parameters_test) {
diff --git a/src/tests/Particle_test.cpp b/src/tests/Particle_test.cpp
index 37f3904d..084706a6 100644
--- a/src/tests/Particle_test.cpp
+++ b/src/tests/Particle_test.cpp
@@ -26,7 +26,7 @@ BOOST_AUTO_TEST_SUITE(Particle_test)
 
     BOOST_AUTO_TEST_CASE(Particle_construction_test) {
         std::vector<double> domain_bound{1, 2, 3, 4, 5};
-        mock_ErrorFunction error;
+        mock_ErrorFunction  error;
         MOCK_EXPECT(error.get_dimension).once().returns(5);
         MOCK_EXPECT(error.eval).once().returns(0.8);
         BOOST_CHECK_NO_THROW(Particle(&error,
@@ -35,7 +35,7 @@ BOOST_AUTO_TEST_SUITE(Particle_test)
 
     BOOST_AUTO_TEST_CASE(Particle_get_coordinate_test) {
         std::vector<double> domain_bound{1, 2, 3, 4, 5};
-        mock_ErrorFunction error;
+        mock_ErrorFunction  error;
 
         MOCK_EXPECT(error.get_dimension).returns(5);
         MOCK_EXPECT(error.eval).returns(0.8);
@@ -50,7 +50,7 @@ BOOST_AUTO_TEST_SUITE(Particle_test)
 
     BOOST_AUTO_TEST_CASE(Particle_get_optimal_value_test) {
         std::vector<double> domain_bound{1, 2, 3, 4, 5};
-        mock_ErrorFunction error;
+        mock_ErrorFunction  error;
 
         MOCK_EXPECT(error.get_dimension).returns(5);
         MOCK_EXPECT(error.eval).returns(0.8);
-- 
GitLab