From 8435d840e74d25994c6e3ba4bf51ec6b879d7e07 Mon Sep 17 00:00:00 2001
From: Martin Beseda <martin.beseda@vsb.cz>
Date: Fri, 22 Mar 2019 12:33:49 +0100
Subject: [PATCH] [CODE] Removed unactive (commented out) parts of the code.

---
 src/CSVReader/CSVReader.cpp                   |   4 -
 src/CSVReader/CSVReader.h                     |   4 -
 src/CrossValidator/CrossValidator.cpp         |   3 -
 src/CrossValidator/CrossValidator.h           |   3 -
 src/DataSet/DataSet.cpp                       |   7 -
 src/DataSet/DataSet.h                         |  16 -
 src/DataSet/DataSetSerialization.h            |   3 -
 src/ErrorFunction/ErrorFunctions.cpp          |  30 --
 src/ErrorFunction/ErrorFunctions.h            |   8 -
 src/ErrorFunction/ErrorFunctionsMock.h        |   3 -
 src/General/ExprtkWrapperSerialization.h      |   3 -
 src/LearningMethods/GradientDescent.cpp       |  74 ---
 src/LearningMethods/GradientDescentBB.cpp     |  31 --
 .../GradientDescentSingleItem.cpp             |   4 -
 src/LearningMethods/LevenbergMarquardt.cpp    |   7 -
 src/LearningMethods/LevenbergMarquardt.h      |   3 -
 src/LearningMethods/ParticleSwarm.cpp         |  41 --
 src/LearningMethods/RandomSolution.cpp        |   1 -
 .../ConnectionFunctionGeneralSerialization.h  |   3 -
 .../ConnectionFunctionIdentity.cpp            |   2 -
 .../ConnectionFunctionIdentity.h              |   9 -
 .../ConnectionFunctionIdentitySerialization.h |   3 -
 src/Network/NeuralNetwork.cpp                 | 437 +-----------------
 src/Network/NeuralNetwork.h                   |   1 -
 src/Network/NeuralNetworkSerialization.h      |   3 -
 src/Network/NeuralNetworkSumSerialization.h   |   3 -
 src/Neuron/Neuron.cpp                         |   1 -
 src/Neuron/NeuronBinary.cpp                   |   3 -
 src/Neuron/NeuronBinarySerialization.h        |   3 -
 src/Neuron/NeuronConstant.cpp                 |   2 -
 src/Neuron/NeuronConstantSerialization.h      |   3 -
 src/Neuron/NeuronLinear.cpp                   |   3 -
 src/Neuron/NeuronLinearSerialization.h        |   3 -
 src/Neuron/NeuronLogistic.cpp                 |   3 -
 src/Neuron/NeuronLogisticSerialization.h      |   3 -
 src/Neuron/NeuronSerialization.h              |   3 -
 .../NormalizationStrategy.cpp                 |   4 -
 .../NormalizationStrategy.h                   |  13 -
 .../NormalizationStrategySerialization.h      |   3 -
 src/Simulator/Simulator.cpp                   |  20 +-
 src/Solvers/DESolver.cpp                      |  62 +--
 src/boost_test_lib_dummy.cpp                  |   3 -
 src/constants.h                               |   3 -
 src/examples/net_test_1.cpp                   |  11 -
 src/examples/net_test_2.cpp                   |  13 -
 src/examples/net_test_3.cpp                   |   3 -
 src/examples/net_test_ode_1.cpp               |   9 -
 src/examples/network_serialization.cpp        |   3 -
 src/examples/seminar.cpp                      |   2 -
 src/examples/simulator.cpp                    |  23 -
 src/examples/simulator_1_2.cpp                |   3 -
 src/examples/x2_fitting.cpp                   |   3 -
 src/exceptions.h                              |   3 -
 src/exprtk.cpp                                |   3 -
 src/message.h                                 |   5 -
 src/settings.h                                |   1 -
 src/tests/DataSet_test.cpp                    | 155 -------
 57 files changed, 10 insertions(+), 1068 deletions(-)

diff --git a/src/CSVReader/CSVReader.cpp b/src/CSVReader/CSVReader.cpp
index b04e2f75..50fcc4dd 100644
--- a/src/CSVReader/CSVReader.cpp
+++ b/src/CSVReader/CSVReader.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 14.11.18.
-//
 
 #include <string>
 #include <fstream>
@@ -26,7 +23,6 @@ namespace lib4neuro {
         this->delimiter = delimiter;
         this->ignore_first_line = ignore_first_line;
         this->header_included = ignore_first_line;
-//        this->data = std::make_unique<std::vector<std::vector<std::string>>>();
     }
 
     void CSVReader::read() {
diff --git a/src/CSVReader/CSVReader.h b/src/CSVReader/CSVReader.h
index 993ea53b..01661ad7 100644
--- a/src/CSVReader/CSVReader.h
+++ b/src/CSVReader/CSVReader.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 14.11.18.
-//
 
 #ifndef LIB4NEURO_CSVREADER_H
 #define LIB4NEURO_CSVREADER_H
@@ -64,7 +61,6 @@ namespace lib4neuro {
          *
          * @return
          */
-//         LIB4NEURO_API std::shared_ptr<std::vector<std::vector<std::string>>> get_data();
         LIB4NEURO_API std::vector<std::vector<std::string>>* get_data();
 
         /**
diff --git a/src/CrossValidator/CrossValidator.cpp b/src/CrossValidator/CrossValidator.cpp
index b345f36c..7af7faa2 100644
--- a/src/CrossValidator/CrossValidator.cpp
+++ b/src/CrossValidator/CrossValidator.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 14.11.18.
-//
 
 #include "CrossValidator.h"
 #include "message.h"
diff --git a/src/CrossValidator/CrossValidator.h b/src/CrossValidator/CrossValidator.h
index 269dcec5..1dc1da6b 100644
--- a/src/CrossValidator/CrossValidator.h
+++ b/src/CrossValidator/CrossValidator.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 14.11.18.
-//
 
 #ifndef LIB4NEURO_CROSSVALIDATOR_H
 #define LIB4NEURO_CROSSVALIDATOR_H
diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index 90984583..b2903a38 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 7/13/18.
-//
 
 #include <algorithm>
 #include <experimental/filesystem>
@@ -50,8 +47,6 @@ namespace lib4neuro {
             std::shared_ptr<NormalizationStrategy> ns_tmp;
             ns_tmp.reset(ns);
             this->normalization_strategy = ns_tmp;
-//            this->max_min_inp_val.emplace_back(this->normalization_strategy->get_max_value());
-//            this->max_min_inp_val.emplace_back(this->normalization_strategy->get_min_value());
         }
 
         //TODO check the complete data set for input/output dimensions
@@ -71,8 +66,6 @@ namespace lib4neuro {
         if(ns) {
             std::shared_ptr<NormalizationStrategy> ns_tmp(ns);
             this->normalization_strategy = ns_tmp;
-//            this->max_min_inp_val.emplace_back(this->normalization_strategy->get_max_value());
-//            this->max_min_inp_val.emplace_back(this->normalization_strategy->get_min_value());
         }
 
         this->add_isotropic_data(lower_bound, upper_bound, size, output);
diff --git a/src/DataSet/DataSet.h b/src/DataSet/DataSet.h
index 14b31d47..34c4527f 100644
--- a/src/DataSet/DataSet.h
+++ b/src/DataSet/DataSet.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 7/13/18.
-//
 
 #ifndef INC_4NEURO_DATASET_H
 #define INC_4NEURO_DATASET_H
@@ -42,15 +39,6 @@ namespace lib4neuro {
          */
         size_t output_dim = 0;
 
-//        /**
-//         * Maximum input value
-//         */
-//        double max_inp_val = //std::numeric_limits<double>::quiet_NaN();
-//
-//        /**
-//         * Minimum input value
-//         */
-//        double min_inp_val = std::numeric_limits<double>::quiet_NaN();
 
         bool normalized = false;
 
@@ -80,10 +68,6 @@ namespace lib4neuro {
         //TODO let user choose in the constructor!
         std::shared_ptr<NormalizationStrategy> normalization_strategy;
 
-//        /**
-//         *
-//         */
-//        bool normalized = false;
 
     public:
 
diff --git a/src/DataSet/DataSetSerialization.h b/src/DataSet/DataSetSerialization.h
index 60534586..94488ff4 100644
--- a/src/DataSet/DataSetSerialization.h
+++ b/src/DataSet/DataSetSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/18/18.
-//
 
 #ifndef LIB4NEURO_DATASETSERIALIZATION_H
 #define LIB4NEURO_DATASETSERIALIZATION_H
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 0d490a06..7df449cc 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 7/15/18.
-//
 
 #include <vector>
 #include <cmath>
@@ -77,7 +74,6 @@ namespace lib4neuro {
 
     std::vector<double>  ErrorFunction::get_parameters() {
         std::vector<double>  output(this->net->get_n_weights() + this->net->get_n_biases());
-//        std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases());
 
         size_t i = 0;
 
@@ -166,13 +162,6 @@ namespace lib4neuro {
         double denormalized_real_input;
         double denormalized_real_output;
 
-//        bool denormalize_output = false;
-//        if (denormalize_data) {
-//            if(data_set->is_normalized()) {
-//                data_set->de_normalize();
-//            }
-//            denormalize_output = true;
-//        }
 
         for (auto i = 0; i < data->size(); i++) {
 
@@ -344,7 +333,6 @@ namespace lib4neuro {
         }
         std::vector<double> error_derivative(dim_out);
 
-//        std::vector<double>* params_tmp = std::make_shared<std::vector<double>>(params);
         for (auto el: *data) {  // Iterate through every element in the test set
 
             this->net->eval_single(el.first, error_derivative,
@@ -431,7 +419,6 @@ namespace lib4neuro {
         std::fill(grad_sum.begin(), grad_sum.end(), 0.0);
         this->net->write_weights();
         this->net->write_biases();
-//        std::vector<double>*  params_tmp = std::make_shared<std::vector<double>>(params);
         for (auto el: *data) {  // Iterate through every element in the test set
 
             this->net->eval_single_debug(el.first, error_derivative,
@@ -504,7 +491,6 @@ namespace lib4neuro {
 
     ErrorSum::ErrorSum() {
         this->summand = nullptr;
-//        this->summand_coefficient = nullptr;
         this->dimension = 0;
     }
 
@@ -512,9 +498,6 @@ namespace lib4neuro {
         if (this->summand) {
             delete this->summand;
         }
-//        if (this->summand_coefficient) {
-//            delete this->summand_coefficient;
-//        }
     }
 
     double ErrorSum::eval_on_test_data(std::vector<double>*  weights,
@@ -652,9 +635,6 @@ namespace lib4neuro {
         }
         this->summand->push_back(F);
 
-//        if (!this->summand_coefficient) {
-//            this->summand_coefficient = new std::vector<double>(0);
-//        }
         this->summand_coefficient.push_back(alpha);
 
         if (F) {
@@ -665,16 +645,6 @@ namespace lib4neuro {
     }
 
     size_t ErrorSum::get_dimension() {
-//    if(!this->dimension) {
-//        size_t max = 0;
-//        for(auto e : *this->summand) {
-//            if(e->get_dimension() > max) {
-//                max = e->get_dimension();
-//            }
-//        };
-//
-//        this->dimension = max;
-//    }
         return this->dimension;
     }
 
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index edc8d6bc..845f7f16 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 7/15/18.
-//
 
 #ifndef INC_4NEURO_ERRORFUNCTION_H
 #define INC_4NEURO_ERRORFUNCTION_H
@@ -27,8 +24,6 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-//        virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false,
-//                bool verbose = false) = 0;
         virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false,
                             bool verbose = false) = 0;
 
@@ -239,9 +234,6 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-//        LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
-//                                  bool denormalize_data = false,
-//                                  bool verbose = false) override;
         LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
                                   bool denormalize_data = false,
                                   bool verbose = false) override;
diff --git a/src/ErrorFunction/ErrorFunctionsMock.h b/src/ErrorFunction/ErrorFunctionsMock.h
index c85a783a..33fe56ce 100644
--- a/src/ErrorFunction/ErrorFunctionsMock.h
+++ b/src/ErrorFunction/ErrorFunctionsMock.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 3.2.19.
-//
 
 #ifndef LIB4NEURO_ERRORFUNCTIONSMOCK_H
 #define LIB4NEURO_ERRORFUNCTIONSMOCK_H
diff --git a/src/General/ExprtkWrapperSerialization.h b/src/General/ExprtkWrapperSerialization.h
index 9996d15b..66f013d1 100644
--- a/src/General/ExprtkWrapperSerialization.h
+++ b/src/General/ExprtkWrapperSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_EXPRTKWRAPPERSERIALIZATION_H
 #define LIB4NEURO_EXPRTKWRAPPERSERIALIZATION_H
diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index 15cd6c30..f20f17f4 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -13,16 +13,11 @@ namespace lib4neuro {
     GradientDescent::GradientDescent(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
-//        this->optimal_parameters = new std::vector<double>(0);
         this->maximum_niters = max_iters;
         this->batch = batch;
     }
 
     GradientDescent::~GradientDescent() {
-//        if (this->optimal_parameters) {
-//            delete this->optimal_parameters;
-//            this->optimal_parameters = nullptr;
-//        }
     }
 
     void GradientDescent::eval_step_size_mk(double &gamma,
@@ -64,7 +59,6 @@ namespace lib4neuro {
 
             error_current = ef.eval( parameters_after.get() );
             if( step_coefficient < 1e-32){
-//                COUT_DEBUG("    Attempting to find a feasible direction in one dimension was NOT SUCCESSFUL" << std::endl);
                 for (i = 0; i < direction->size(); ++i) {
                     (*parameters_after)[i] = (*parameters_before)[i] - step_coefficient * (*direction)[i];
                 }
@@ -72,11 +66,9 @@ namespace lib4neuro {
             }
             else{
                 if( error_current >=  error_previous ){
-//                    COUT_DEBUG("    Incorrect step size! Reducing it by a factor of 0.5. Errors: " << error_current << ", prev: " << error_previous << std::endl);
                     step_coefficient *= 0.5;
                 }
                 else{
-//                    COUT_DEBUG("    Step OK" << std::endl);
                 }
             }
         }
@@ -117,8 +109,6 @@ namespace lib4neuro {
         std::vector<double>* params_prev(new std::vector<double>(n_parameters));
         std::vector<double>* ptr_mem;
 
-//    std::vector<double> gradient_mem( n_parameters );
-//    std::vector<double> parameters_analytical( n_parameters );
 
 
         std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
@@ -137,14 +127,8 @@ namespace lib4neuro {
 
             /* reset of the current gradient */
             std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-//        std::fill(gradient_mem.begin(), gradient_mem.end(), 0.0);
             ef.calculate_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
-//        double error_analytical = this->calculate_gradient( ef.get_dataset()->get_data(), (size_t)2, params_current, gradient_current );
 
-//        for(size_t k = 0; k < gradient_mem.size(); ++k){
-//            printf("%f : %f\n", gradient_mem[ k ], gradient_current->at( k ));
-//        }
-//        printf("---------------------\n");
 
             grad_norm = 0.0;
             for (auto v: *gradient_current) {
@@ -157,8 +141,6 @@ namespace lib4neuro {
             if (iter_counter < 10 || iter_counter % this->restart_frequency == 0 ) {
                 /* fixed step length */
                 gamma = 0.1 * this->tolerance;
-//                gamma = 1 / grad_norm;
-//                gamma = 1e-4;
                 cooling = 1.0;
             } else {
                 /* angle between two consecutive gradients */
@@ -183,48 +165,6 @@ namespace lib4neuro {
             }
             val = ef.eval(params_prev);
 
-//            val = prev_val + 1.0;
-//            coeff = 1;
-//            it_analyzed = false;
-//            while(val >= prev_val){
-//                for (i = 0; i < gradient_current->size(); ++i) {
-//                    (*params_prev)[i] = (*params_current)[i] - coeff * gamma * (*gradient_current)[i];
-//                }
-//                val = ef.eval(params_prev);
-//
-//
-//                if( coeff < 1e-32){
-////                    COUT_DEBUG("Error, the advised gradient direction is not feasible. Attempting to find a feasible direction in one dimension" << std::endl);
-//                    if( !this->perform_feasible_1D_step(ef, prev_val, gamma, gradient_current, params_current, params_prev) ){
-//                        gamma = 1;
-//                        counter_simplified_direction_bad++;
-//                    }
-//                    else{
-//                        gamma = 1;
-//                        counter_simplified_direction_good++;
-//                    }
-//
-//                    break;
-//                }
-//                else{
-//                    if( val >=  prev_val ){
-////                        COUT_DEBUG("Incorrect step size! Reducing gamma. Errors: " << val << ", prev: " << prev_val << std::endl);
-//                        coeff *= 0.5;
-//
-//                        if( !it_analyzed ){
-//                            counter_bad_guesses++;
-//                        }
-//                    }
-//                    else{
-////                        COUT_DEBUG("Step OK" << std::endl);
-//                        if( !it_analyzed ){
-//                            counter_good_guesses++;
-//                        }
-//                    }
-//                }
-//                it_analyzed = true;
-//            }
-//            gamma *= coeff;
 
 
 
@@ -252,14 +192,6 @@ namespace lib4neuro {
                                                   << ". Total error: " << val
                                                   << "." << std::endl);
 
-//            if(iter_counter % 100 == 0){
-//                COUT_INFO(std::string("Iteration: ") << (unsigned int)(iter_counter)
-//                                                      << ". Step size: " << gamma
-//                                                      << ". C: " << c
-//                                                      << ". Gradient norm: " << grad_norm
-//                                                      << ". Total error: " << val
-//                                                      << ".\r");
-//            }
 
             cooling *= 0.9999;
 
@@ -296,13 +228,7 @@ namespace lib4neuro {
         }
 
         this->optimal_parameters = *params_current;
-//        std::shared_ptr<std::vector<double>> params;
-//        params.reset(this->optimal_parameters);
         ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
 
-//        delete gradient_current;
-//        delete gradient_prev;
-//        delete params_current;
-//        delete params_prev;
     }
 }
diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp
index 823edc1f..1a6b54dc 100644
--- a/src/LearningMethods/GradientDescentBB.cpp
+++ b/src/LearningMethods/GradientDescentBB.cpp
@@ -12,16 +12,11 @@ namespace lib4neuro {
     GradientDescentBB::GradientDescentBB(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
-//        this->optimal_parameters = new std::vector<double>(0);
         this->maximum_niters = max_iters;
         this->batch = batch;
     }
 
     GradientDescentBB::~GradientDescentBB() {
-//        if (this->optimal_parameters) {
-//            delete this->optimal_parameters;
-//            this->optimal_parameters = nullptr;
-//        }
     }
 
 
@@ -71,7 +66,6 @@ namespace lib4neuro {
         val = ef.eval(params_current);
         val_best = val;
 
-//        this-> batch = 0;
         double cooling_factor = 1.0;
         while (grad_norm > this->tolerance && (iter_idx != 0)) {
             iter_idx--;
@@ -81,14 +75,8 @@ namespace lib4neuro {
 
             /* reset of the current gradient */
             std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
-//        std::fill(gradient_mem.begin(), gradient_mem.end(), 0.0);
             ef.calculate_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
-//        double error_analytical = this->calculate_gradient( ef.get_dataset()->get_data(), (size_t)2, params_current, gradient_current );
 
-//        for(size_t k = 0; k < gradient_mem.size(); ++k){
-//            printf("%f : %f\n", gradient_mem[ k ], gradient_current->at( k ));
-//        }
-//        printf("---------------------\n");
 
             grad_norm = 0.0;
             for (auto v: *gradient_current) {
@@ -102,8 +90,6 @@ namespace lib4neuro {
             if (iter_counter < 10 || iter_counter % this->restart_frequency < 10 ) {
                 /* fixed step length */
                 gamma = 0.1 * this->tolerance;
-//                gamma = 1 / grad_norm;
-//                gamma = 1e-4;
                 cooling_factor = 1.0;
             } else {
 
@@ -166,14 +152,6 @@ namespace lib4neuro {
                                                   << ". Total error: " << val << ". the lowest error: " << val_best
                                                   << "." << std::endl);
 
-//            if(iter_counter % 100 == 0){
-//                COUT_INFO(std::string("Iteration: ") << (unsigned int)(iter_counter)
-//                                                      << ". Step size: " << gamma
-//                                                      << ". C: " << c
-//                                                      << ". Gradient norm: " << grad_norm
-//                                                      << ". Total error: " << val
-//                                                      << ".\r");
-//            }
 
             cooling_factor *= 0.99999;
 
@@ -211,16 +189,7 @@ namespace lib4neuro {
 
         this->optimal_parameters = *params_best;
 
-//        ef.analyze_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
-//        std::vector<double>* params;
-//        params.reset(this->optimal_parameters);
         ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
-//
-//        delete gradient_current;
-//        delete gradient_prev;
-//        delete params_current;
-//        delete params_prev;
-//        delete params_best;
     }
 
 }
diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp
index 58d473e6..a107c634 100644
--- a/src/LearningMethods/GradientDescentSingleItem.cpp
+++ b/src/LearningMethods/GradientDescentSingleItem.cpp
@@ -14,7 +14,6 @@ namespace lib4neuro {
     GradientDescentSingleItem::GradientDescentSingleItem(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
-//        this->optimal_parameters = new std::vector<double>(0);
         this->maximum_niters = max_iters;
         this->batch = batch;
     }
@@ -46,7 +45,6 @@ namespace lib4neuro {
 
             value_shifted = f.eval( shifted_x.get() );
         }
-//        std::cout << "Error reduction: " << value - value_shifted << std::endl;
         return alpha;
     }
 
@@ -99,8 +97,6 @@ namespace lib4neuro {
         COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements << ", # of elements with high error: " << updated_elements << ", max. error: " << max_error << std::endl);
 
         this->optimal_parameters = &parameter_vector;
-//        std::shared_ptr<std::vector<double>> opt_params;
-//        opt_params.reset(this->optimal_parameters);
         ef.get_network_instance()->copy_parameter_space( this->optimal_parameters );
 
     }
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index 71bc9019..a83da4a9 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -87,7 +87,6 @@ namespace lib4neuro {
         this->p_impl->lambda_increase = lambda_increase;
         this->p_impl->lambda_decrease = lambda_decrease;
         this->p_impl->maximum_niters = max_iters;
-//        this->p_impl->optimal_parameters = new std::vector<double>(5);
     }
 
     void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
@@ -121,7 +120,6 @@ namespace lib4neuro {
 
         std::shared_ptr<std::vector<double>> params_tmp;
         params_tmp.reset(new std::vector<double>(n_parameters));
-//        std::vector<double> *params_tmp = new std::vector<double>(n_parameters);
         arma::Mat<double> J(n_data_points, n_parameters);  // Jacobian matrix
         arma::Mat<double> H(n_data_points, n_parameters);  // Hessian matrix
         arma::Mat<double> H_new(n_data_points, n_parameters);
@@ -148,7 +146,6 @@ namespace lib4neuro {
                 std::vector<std::pair<std::vector<double>, std::vector<double>>> subset = ef.get_dataset()->get_random_data_batch(this->p_impl->batch_size);
                 this->p_impl->get_jacobian_and_rhs(*ef.get_network_instance(), J, rhs, subset);
 
-//                this->p_impl->get_jacobian_and_rhs(*ef.get_network_instance(), J, rhs, *ef.get_dataset()->get_data());
 
                 gradient_norm = 0;
 
@@ -175,7 +172,6 @@ namespace lib4neuro {
             }
 
             /* H_new = H + lambda*I */
-//            H_new = H + lambda * arma::diagmat( H );
             H_new = H + lambda * arma::eye( n_parameters, n_parameters );
 
 
@@ -212,7 +208,6 @@ namespace lib4neuro {
                 prev_err = current_err;
                 update_J = true;
 
-//                COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << std::endl);
 
             } else {
                 /* If the error after parameters update is not lower, increase the damping factor lambda */
@@ -235,10 +230,8 @@ namespace lib4neuro {
             params_current = nullptr;
         }
 
-//        std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->p_impl->optimal_parameters);
         ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
 
-//        delete params_tmp;
 
     }
 
diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h
index 1622b88f..332e2384 100644
--- a/src/LearningMethods/LevenbergMarquardt.h
+++ b/src/LearningMethods/LevenbergMarquardt.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9.2.19.
-//
 
 #ifndef LIB4NEURO_LEVENBERGMARQUARDT_H
 #define LIB4NEURO_LEVENBERGMARQUARDT_H
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index af97eb13..a10a0c3d 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -81,7 +81,6 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bou
         (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
     }
 
-//    std::shared_ptr<std::vector<double>> coord(this->coordinate);
     this->optimal_value = this->ef->eval(this->coordinate);
 
 }
@@ -95,7 +94,6 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_sy
     }
 
     this->domain_bounds = new std::vector<double>(2 * central_system->size());
-//    return;
 
 
     for( size_t i = 0; i < central_system->size(); ++i ){
@@ -119,7 +117,6 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_sy
         (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
     }
 
-//    std::shared_ptr<std::vector<double>> coord(this->coordinate);
     this->optimal_value = this->ef->eval(this->coordinate);
 
 }
@@ -210,7 +207,6 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
         output += std::abs(vel_mem);
     }
 
-//    std::shared_ptr<std::vector<double>> coord(this->coordinate);
     vel_mem = this->ef->eval(this->coordinate);
     this->current_val = vel_mem;
 
@@ -258,23 +254,6 @@ namespace lib4neuro {
 
     ParticleSwarm::~ParticleSwarm() {
 
-//        if (this->particle_swarm) {
-//            for (size_t i = 0; i < this->n_particles; ++i) {
-//                delete this->particle_swarm.at(i);
-//            }
-//
-//            delete this->particle_swarm;
-//            this->particle_swarm = nullptr;
-//        }
-//
-//        if( this->domain_bounds ){
-//            delete this->domain_bounds;
-//        }
-//
-//        if (this->optimal_parameters) {
-//            delete this->optimal_parameters;
-//            this->optimal_parameters = nullptr;
-//        }
 
     }
 
@@ -310,11 +289,7 @@ namespace lib4neuro {
         }
         this->radius_factor *= 1.25;
 
-//        if (!this->optimal_parameters) {
-//            this->optimal_parameters = new std::vector<double>(this->func_dim);
-//        } else {
             this->optimal_parameters.resize(this->func_dim);
-//        }
 
         size_t outer_it = 0;
         Particle *particle;
@@ -334,9 +309,6 @@ namespace lib4neuro {
         double current_err = -1;
 
         this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
-//    for(unsigned int i = 0; i < this->n_particles; ++i){
-//        this->particle_swarm[i]->print_coordinate();
-//    }
         COUT_INFO("Initial best value: " << optimal_value << std::endl);
 
         while (outer_it < this->iter_max) {
@@ -375,7 +347,6 @@ namespace lib4neuro {
                 particle = this->particle_swarm.at(pi);
                 tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->optimal_parameters,
                                                            global_best_vec);
-//                particle->print_coordinate();
 
                 if (tmp_velocity > max_velocity) {
                     prev_max_velocity = max_velocity;
@@ -406,14 +377,7 @@ namespace lib4neuro {
                 //std::cout.flush();
             }
 
-//        for(unsigned int i=0; i < this->n_particles; i++) {
-//            printf("Particle %d (%f): \n", i, this->particle_swarm[i]->get_current_value());
-//            for(unsigned int j=0; j < this->func_dim; j++) {
-//                printf("\t%f\n", this->particle_swarm[i]->get_coordinate()[j]);
-//            }
-//        }
 
-//            std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->optimal_parameters);
             current_err = ef.eval(&this->optimal_parameters);
 
             COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
@@ -439,7 +403,6 @@ namespace lib4neuro {
             outer_it++;
 
             //TODO parameter for inertia weight decrease?
-//        this->w *= 0.99;
 
         }
         COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
@@ -457,8 +420,6 @@ namespace lib4neuro {
             COUT_INFO( std::endl << "Max number of iterations reached ("  <<  outer_it << ")!  Objective function value: " << optimal_value <<std:: endl);
         }
 
-//        std::shared_ptr<std::vector<double>> coord;
-//        coord.reset(this->optimal_parameters);
         ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
 
         delete centroid;
@@ -546,8 +507,6 @@ namespace lib4neuro {
         this->w = w;
         this->n_particles = n_particles;
         this->iter_max = iter_max;
-//        this->particle_swarm = new std::vector<Particle *>(this->n_particles);
-//        this->domain_bounds = new std::vector<double>(*domain_bounds);
         std::fill(this->particle_swarm.begin(), this->particle_swarm.end(), nullptr);
     }
 
diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp
index 8372d058..ea0dc2d1 100644
--- a/src/LearningMethods/RandomSolution.cpp
+++ b/src/LearningMethods/RandomSolution.cpp
@@ -11,7 +11,6 @@
 namespace lib4neuro {
 
     RandomSolution::RandomSolution() {
-//        this->optimal_parameters =  new std::vector<double>();
     }
 
     RandomSolution::~RandomSolution() {}
diff --git a/src/NetConnection/ConnectionFunctionGeneralSerialization.h b/src/NetConnection/ConnectionFunctionGeneralSerialization.h
index f1eba16d..afc9efce 100644
--- a/src/NetConnection/ConnectionFunctionGeneralSerialization.h
+++ b/src/NetConnection/ConnectionFunctionGeneralSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_CONNECTIONFUNCTIONGENERALSERIALIZATION_H
 #define LIB4NEURO_CONNECTIONFUNCTIONGENERALSERIALIZATION_H
diff --git a/src/NetConnection/ConnectionFunctionIdentity.cpp b/src/NetConnection/ConnectionFunctionIdentity.cpp
index 5446bd51..10abcaae 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.cpp
+++ b/src/NetConnection/ConnectionFunctionIdentity.cpp
@@ -14,12 +14,10 @@
 BOOST_CLASS_EXPORT_IMPLEMENT(ConnectionFunctionIdentity);
 
 ConnectionFunctionIdentity::ConnectionFunctionIdentity( ) {
-//    this->type = CONNECTION_TYPE::IDENTITY;
     this->is_unitary = true;
 }
 
 ConnectionFunctionIdentity::ConnectionFunctionIdentity( size_t pidx ) {
-//    this->type = CONNECTION_TYPE::IDENTITY;
     this->param_idx = pidx;
     this->is_unitary = false;
 }
diff --git a/src/NetConnection/ConnectionFunctionIdentity.h b/src/NetConnection/ConnectionFunctionIdentity.h
index 4a7d79ad..52431b69 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.h
+++ b/src/NetConnection/ConnectionFunctionIdentity.h
@@ -15,8 +15,6 @@
  *
  */
 class ConnectionFunctionIdentity : public ConnectionFunctionGeneral {
-//    friend class boost::serialization::access;
-//    friend class NeuralNetwork;
 
 private:
 
@@ -24,13 +22,6 @@ private:
 
     bool is_unitary = false;
 
-//protected:
-//    template<class Archive>
-//    void serialize(Archive & ar, const unsigned int version){
-//        ar & boost::serialization::base_object<ConnectionFunctionGeneral>(*this);
-//        ar & this->param_idx;
-//        ar & this->is_unitary;
-//    };
 
 public:
 
diff --git a/src/NetConnection/ConnectionFunctionIdentitySerialization.h b/src/NetConnection/ConnectionFunctionIdentitySerialization.h
index 6f0c39f5..c0f3c5d8 100644
--- a/src/NetConnection/ConnectionFunctionIdentitySerialization.h
+++ b/src/NetConnection/ConnectionFunctionIdentitySerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_CONNECTIONFUNCTIONIDENTITYSERIALIZATION_H
 #define LIB4NEURO_CONNECTIONFUNCTIONIDENTITYSERIALIZATION_H
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index ade621a6..49ed2879 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -17,20 +17,8 @@
 
 namespace lib4neuro {
     NeuralNetwork::NeuralNetwork() {
-//        this->neurons = new ::std::vector<Neuron *>(0);
-//        this->neuron_biases = new ::std::vector<double>(0);
-//        this->neuron_bias_indices = new ::std::vector<int>(0);
 
-//        this->connection_weights = new ::std::vector<double>(0);
-//        this->connection_list = ::std::vector<std::shared_ptr<ConnectionFunctionGeneral>>(0);
-//        this->inward_adjacency = new ::std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
-//        this->outward_adjacency = new ::std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
-//
-//        this->neuron_layers_feedforward = new ::std::vector<std::vector<size_t> *>(0);
-//        this->neuron_layers_feedbackward = new ::std::vector<std::vector<size_t> *>(0);
 
-//        this->input_neuron_indices = new ::std::vector<size_t>(0);
-//        this->output_neuron_indices = new ::std::vector<size_t>(0);
 
         this->delete_weights = true;
         this->delete_biases = true;
@@ -56,104 +44,11 @@ namespace lib4neuro {
 
     NeuralNetwork::~NeuralNetwork() {
 
-//        if (this->neurons) {
-//            for (auto n: *(this->neurons)) {
-//                delete n;
-//                n = nullptr;
-//            }
-//            delete this->neurons;
-//            this->neurons = nullptr;
-//        }
-
-//        if (this->neuron_potentials) {
-//            delete this->neuron_potentials;
-//            this->neuron_potentials = nullptr;
-//        }
-//
-//        if (this->neuron_bias_indices) {
-//            delete this->neuron_bias_indices;
-//            this->neuron_bias_indices = nullptr;
-//        }
-//
-//        if (this->output_neuron_indices) {
-//            delete this->output_neuron_indices;
-//            this->output_neuron_indices = nullptr;
-//        }
-//
-//        if (this->input_neuron_indices) {
-//            delete this->input_neuron_indices;
-//            this->input_neuron_indices = nullptr;
-//        }
-
-//        if (this->connection_weights && this->delete_weights) {
-//            delete this->connection_weights;
-//            this->connection_weights = nullptr;
-//        }
-
-//        if (this->neuron_biases && this->delete_biases) {
-//            delete this->neuron_biases;
-//            this->neuron_biases = nullptr;
-//        }
-
-//        if (this->connection_list) {
-//
-//            if (this->delete_weights) {
-//                for (auto& c: *this->connection_list) {
-//                    printf("%p\n", c);
-//                    if(c) {
-//                        printf("Deleting %p\n", c);
-////                        delete c;
-////                        c = nullptr;
-//                    }
-//                }
-//            }
-//        }
-//        this->connection_list.clear();
-//        delete this->connection_list;
-//        this->connection_list = nullptr;
-
-//        if (this->inward_adjacency) {
-//            for (auto e: *this->inward_adjacency) {
-//                if (e) {
-//                    delete e;
-//                    e = nullptr;
-//                }
-//            }
-//            delete this->inward_adjacency;
-//            this->inward_adjacency = nullptr;
-//        }
-//
-//        if (this->outward_adjacency) {
-//            for (
-//                auto e: *this->outward_adjacency) {
-//                if (e) {
-//                    delete e;
-//                    e = nullptr;
-//                }
-//            }
-//            delete this-> outward_adjacency;
-//            this->outward_adjacency = nullptr;
-//        }
-//
-//        if (this->neuron_layers_feedforward) {
-//            for (
-//                auto e: this->neuron_layers_feedforward) {
-//                delete e;
-//                e = nullptr;
-//            }
-//            delete this->neuron_layers_feedforward;
-//            this->neuron_layers_feedforward = nullptr;
-//        }
-//
-//        if (this->neuron_layers_feedbackward) {
-//            for (
-//                auto e: *this->neuron_layers_feedbackward) {
-//                delete e;
-//                e = nullptr;
-//            }
-//            delete this->neuron_layers_feedbackward;
-//            this->neuron_layers_feedbackward = nullptr;
-//        }
+
+
+
+
+
 
     }
 
@@ -164,221 +59,6 @@ namespace lib4neuro {
 
         NeuralNetwork *output_net = nullptr;
 // TODO rework due to the changed structure of the class
-//    Neuron * active_neuron, * target_neuron;
-//
-//    size_t n = this->neurons.size();
-//    bool *part_of_subnetwork = new bool[n];
-//    ::std::fill(part_of_subnetwork, part_of_subnetwork + n, false);
-//
-//    bool *is_reachable_from_source = new bool[n];
-//    bool *is_reachable_from_destination = new bool[n];
-//    ::std::fill(is_reachable_from_source, is_reachable_from_source + n, false);
-//    ::std::fill(is_reachable_from_destination, is_reachable_from_destination + n, false);
-//
-//    bool *visited_neurons = new bool[n];
-//    ::std::fill(visited_neurons, visited_neurons + n, false);
-//
-//    size_t active_set_size[2];
-//    active_set_size[0] = 0;
-//    active_set_size[1] = 0;
-//    size_t * active_neuron_set = new size_t[2 * n];
-//    size_t idx1 = 0, idx2 = 1;
-//
-//    /* MAPPING BETWEEN NEURONS AND THEIR INDICES */
-//    size_t idx = 0, idx_target;
-//    for(Neuron *v: *this->neurons){
-//        v->set_idx( idx );
-//        idx++;
-//    }
-//
-//    /* INITIAL STATE FOR THE FORWARD PASS */
-//    for(size_t i: input_neuron_indices ){
-//
-//        if( i < 0 || i >= n){
-//            //invalid index
-//            continue;
-//        }
-//        active_neuron_set[idx1 * n + active_set_size[idx1]] = i;
-//        active_set_size[idx1]++;
-//
-//        visited_neurons[i] = true;
-//    }
-//
-//    /* FORWARD PASS */
-//    while(active_set_size[idx1] > 0){
-//
-//        //we iterate through the active neurons and propagate the signal
-//        for(int i = 0; i < active_set_size[idx1]; ++i){
-//            idx = active_neuron_set[i];
-//
-//            is_reachable_from_source[ idx ] = true;
-//
-//            active_neuron = this->neurons.at( idx );
-//
-//            for(Connection* connection: *(active_neuron->get_connections_out())){
-//
-//                target_neuron = connection->get_neuron_out( );
-//                idx_target = target_neuron->get_idx( );
-//
-//                if( visited_neurons[idx_target] ){
-//                    //this neuron was already analyzed
-//                    continue;
-//                }
-//
-//                visited_neurons[idx_target] = true;
-//                active_neuron_set[active_set_size[idx2] + n * idx2] = idx_target;
-//                active_set_size[idx2]++;
-//            }
-//        }
-//        idx1 = idx2;
-//        idx2 = (idx1 + 1) % 2;
-//        active_set_size[idx2] = 0;
-//    }
-//
-//
-//    /* INITIAL STATE FOR THE FORWARD PASS */
-//    active_set_size[0] = active_set_size[1] = 0;
-//    ::std::fill(visited_neurons, visited_neurons + n, false);
-//
-//    for(size_t i: output_neuron_indices ){
-//
-//        if( i < 0 || i >= n){
-//            //invalid index
-//            continue;
-//        }
-//        active_neuron_set[idx1 * n + active_set_size[idx1]] = i;
-//        active_set_size[idx1]++;
-//
-//        visited_neurons[i] = true;
-//    }
-//
-//    /* BACKWARD PASS */
-//    size_t n_new_neurons = 0;
-//    while(active_set_size[idx1] > 0){
-//
-//        //we iterate through the active neurons and propagate the signal
-//        for(int i = 0; i < active_set_size[idx1]; ++i){
-//            idx = active_neuron_set[i];
-//
-//            is_reachable_from_destination[ idx ] = true;
-//
-//            active_neuron = this->neurons.at( idx );
-//
-//            if(is_reachable_from_source[ idx ]){
-//                n_new_neurons++;
-//            }
-//
-//            for(Connection* connection: *(active_neuron->get_connections_in())){
-//
-//                target_neuron = connection->get_neuron_in( );
-//                idx_target = target_neuron->get_idx( );
-//
-//                if( visited_neurons[idx_target] ){
-//                    //this neuron was already analyzed
-//                    continue;
-//                }
-//
-//                visited_neurons[idx_target] = true;
-//                active_neuron_set[active_set_size[idx2] + n * idx2] = idx_target;
-//                active_set_size[idx2]++;
-//            }
-//        }
-//        idx1 = idx2;
-//        idx2 = (idx1 + 1) % 2;
-//        active_set_size[idx2] = 0;
-//    }
-//
-//    /* FOR CONSISTENCY REASONS */
-//    for(size_t in: input_neuron_indices){
-//        if( !is_reachable_from_destination[in] ){
-//            n_new_neurons++;
-//        }
-//        is_reachable_from_destination[in] = true;
-//    }
-//    /* FOR CONSISTENCY REASONS */
-//    for(size_t in: output_neuron_indices){
-//        if( !is_reachable_from_source[in] ){
-//            n_new_neurons++;
-//        }
-//        is_reachable_from_source[in] = true;
-//    }
-//
-//    /* WE FORM THE SET OF NEURONS IN THE OUTPUT NETWORK  */
-//    if(n_new_neurons > 0){
-////        printf("Number of new neurons: %d\n", n_new_neurons);
-//        output_net = new NeuralNetwork();
-//        output_net->set_weight_array( this->connection_weights );
-//
-//        ::std::vector<size_t > local_inputs(0), local_outputs(0);
-//        local_inputs.reserve(input_neuron_indices.size());
-//        local_outputs.reserve(output_neuron_indices.size());
-//
-//        ::std::vector<Neuron*> local_n_arr(0);
-//        local_n_arr.reserve( n_new_neurons );
-//
-//        ::std::vector<Neuron*> local_local_n_arr(0);
-//        local_local_n_arr.reserve( n_new_neurons );
-//
-//        int * neuron_local_mapping = new int[ n ];
-//        ::std::fill(neuron_local_mapping, neuron_local_mapping + n, -1);
-//        idx = 0;
-//        for(size_t i = 0; i < n; ++i){
-//            if(is_reachable_from_source[i] && is_reachable_from_destination[i]){
-//                neuron_local_mapping[i] = (int)idx;
-//                idx++;
-//
-//                Neuron *new_neuron = this->neurons.at(i)->get_copy( );
-//
-//                output_net->add_neuron( new_neuron );
-//                local_local_n_arr.push_back( new_neuron );
-//                local_n_arr.push_back( this->neurons.at(i) );
-//            }
-//        }
-//        for(size_t in: input_neuron_indices){
-//            local_inputs.push_back(neuron_local_mapping[in]);
-//        }
-//        for(size_t in: output_neuron_indices){
-//            local_outputs.push_back(neuron_local_mapping[in]);
-//        }
-//
-////        printf("%d\n", local_n_arr.size());
-////        printf("inputs: %d, outputs: %d\n", local_inputs.size(), local_outputs.size());
-//        int local_idx_1, local_idx_2;
-//        for(Neuron* source_neuron: local_n_arr){
-//            //we also add the relevant edges
-//            local_idx_1 = neuron_local_mapping[source_neuron->get_idx()];
-//
-//            for(Connection* connection: *(source_neuron->get_connections_out( ))){
-//                target_neuron = connection->get_neuron_out();
-//
-//                local_idx_2 = neuron_local_mapping[target_neuron->get_idx()];
-//                if(local_idx_2 >= 0){
-//                    //this edge is part of the subnetwork
-//                    Connection* new_connection = connection->get_copy( local_local_n_arr[local_idx_1], local_local_n_arr[local_idx_2] );
-//
-//                    local_local_n_arr[local_idx_1]->add_connection_out(new_connection);
-//                    local_local_n_arr[local_idx_2]->add_connection_in(new_connection);
-//
-////                    printf("adding a connection between neurons %d, %d\n", local_idx_1, local_idx_2);
-//                }
-//
-//            }
-//
-//        }
-//        output_net->specify_input_neurons(local_inputs);
-//        output_net->specify_output_neurons(local_outputs);
-//
-//
-//        delete [] neuron_local_mapping;
-//    }
-//
-//    delete [] is_reachable_from_source;
-//    delete [] is_reachable_from_destination;
-//    delete [] part_of_subnetwork;
-//    delete [] visited_neurons;
-//    delete [] active_neuron_set;
-//
-//
         return output_net;
     }
 
@@ -422,8 +102,6 @@ namespace lib4neuro {
         double potential, bias;
         int bias_idx;
 
-//        std::shared_ptr<std::vector<double>> params;
-//        params.reset(custom_weights_and_biases);
         this->copy_parameter_space(custom_weights_and_biases);
 
         this->analyze_layer_structure();
@@ -485,10 +163,8 @@ namespace lib4neuro {
                                          size_t weight_idx) {
 
         std::shared_ptr<ConnectionFunctionIdentity> con_weight_u1u2;
-//        ConnectionFunctionIdentity* con_weight_u1u2;
         if (sct == SIMPLE_CONNECTION_TYPE::UNITARY_WEIGHT) {
             con_weight_u1u2 = std::make_shared<ConnectionFunctionIdentity>(ConnectionFunctionIdentity());
-//            con_weight_u1u2 = new ConnectionFunctionIdentity();
         } else {
             if (sct == SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT) {
                 weight_idx = this->connection_weights.size();
@@ -500,11 +176,9 @@ namespace lib4neuro {
             }
 
             con_weight_u1u2 = std::make_shared<ConnectionFunctionIdentity>(ConnectionFunctionIdentity(weight_idx));
-//            con_weight_u1u2 = new ConnectionFunctionIdentity(weight_idx);
         }
 
         size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
-//        size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
 
         this->add_outward_connection(n1_idx, n2_idx, conn_idx);
         this->add_inward_connection(n2_idx, n1_idx, conn_idx);
@@ -554,14 +228,10 @@ namespace lib4neuro {
     void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork &parent_network) {
 
         if (!this->connection_weights.empty()) {
-//            delete connection_weights;
             this->connection_weights.clear();
         }
 
         this->neuron_biases.clear();
-//        if (this->neuron_biases) {
-//            delete this->neuron_biases;
-//        }
 
         this->connection_weights = parent_network.connection_weights;
         this->neuron_biases = parent_network.neuron_biases;
@@ -589,8 +259,6 @@ namespace lib4neuro {
         double potential, bias;
         int bias_idx;
 
-//        std::shared_ptr<std::vector<double>> params;
-//        params.reset(custom_weights_and_biases);
         this->copy_parameter_space(custom_weights_and_biases);
 
         this->analyze_layer_structure();
@@ -861,22 +529,10 @@ namespace lib4neuro {
     void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
         this->input_neuron_indices = input_neurons_indices;
         
-//        if (!this->input_neuron_indices) {
-//            this->input_neuron_indices = new ::std::vector<size_t>(input_neurons_indices);
-//        } else {
-//            delete this->input_neuron_indices;
-//            this->input_neuron_indices = new ::std::vector<size_t>(input_neurons_indices);
-//        }
     }
 
     void NeuralNetwork::specify_output_neurons(std::vector<size_t> &output_neurons_indices) {
         this->output_neuron_indices = output_neurons_indices;
-//        if (!this->output_neuron_indices) {
-//            this->output_neuron_indices = new ::std::vector<size_t>(output_neurons_indices);
-//        } else {
-//            delete this->output_neuron_indices;
-//            this->output_neuron_indices = new ::std::vector<size_t>(output_neurons_indices);
-//        }
     }
 
     void NeuralNetwork::write_weights() {
@@ -1019,10 +675,6 @@ namespace lib4neuro {
         return &this->connection_weights;
     }
 
-//    size_t NeuralNetwork::add_new_connection_to_list(ConnectionFunctionGeneral *con) {
-//        this->connection_list.push_back(std::make_shared<ConnectionFunctionGeneral>(*con));
-//        return this->connection_list.size() - 1;
-//    }
 
     size_t NeuralNetwork::add_new_connection_to_list(std::shared_ptr<ConnectionFunctionGeneral> con) {
         this->connection_list.push_back(con);
@@ -1055,26 +707,8 @@ namespace lib4neuro {
 
         /* space allocation */
         this->neuron_layers_feedforward.clear();
-//        if (this->neuron_layers_feedforward) {
-//            for (auto e: this->neuron_layers_feedforward) {
-//                delete e;
-//                e = nullptr;
-//            }
-//            delete this->neuron_layers_feedforward;
-//            this->neuron_layers_feedforward = nullptr;
-//        }
-
-//    if(this->neuron_layers_feedbackward){
-//        for(auto e: *this->neuron_layers_feedbackward){
-//            delete e;
-//            e = nullptr;
-//        }
-//        delete this->neuron_layers_feedbackward;
-//        this->neuron_layers_feedbackward = nullptr;
-//    }
-
-//        this->neuron_layers_feedforward = new ::std::vector<std::vector<size_t> *>(0);
-//    this->neuron_layers_feedbackward = new ::std::vector<std::vector<size_t>*>(0);
+
+
 
 
         auto n = this->neurons.size();
@@ -1143,48 +777,6 @@ namespace lib4neuro {
         }
 
 
-//    /* feed backward analysis */
-//    active_set_size[0] = 0;
-//    active_set_size[1] = 0;
-//
-//    idx1 = 0;
-//    idx2 = 1;
-//
-//    active_set_size[0] = this->get_n_outputs();
-//    for(i = 0; i < this->get_n_outputs(); ++i){
-//        active_eval_set[i] = this->output_neuron_indices.at(i);
-//    }
-//
-//    while(active_set_size[idx1] > 0){
-//
-//        /* we add the current active set as the new outward layer */
-//        ::std::vector<size_t> *new_feedbackward_layer = new ::std::vector<size_t>(active_set_size[idx1]);
-//        this->neuron_layers_feedbackward->push_back( new_feedbackward_layer );
-//
-//        //we iterate through the active neurons and propagate the signal backward
-//        for(i = 0; i < active_set_size[idx1]; ++i){
-//            active_ni = active_eval_set[i + n * idx1];
-//            new_feedbackward_layer->at( i ) = active_ni;
-//
-//            if(!this->inward_adjacency.at(active_ni)){
-//                continue;
-//            }
-//
-//            for(auto ni: *(this->inward_adjacency.at(active_ni))){
-//                outward_saturation[ni.first]--;
-//
-//                if(outward_saturation[ni.first] == 0){
-//                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
-//                    active_set_size[idx2]++;
-//                }
-//            }
-//        }
-//
-//        idx1 = idx2;
-//        idx2 = (idx1 + 1) % 2;
-//
-//        active_set_size[idx2] = 0;
-//    }
 
         this->layers_analyzed = true;
     }
@@ -1235,21 +827,8 @@ namespace lib4neuro {
                                          "doesn't specify input and output layers, which are compulsory!");
         }
 
-//        this->neurons = new ::std::vector<Neuron *>(0);
-//        this->neuron_biases = new ::std::vector<double>(0);
-//        this->neuron_potentials = new ::std::vector<double>(0);
-//        this->neuron_bias_indices = new ::std::vector<int>(0);
 
-//        this->connection_weights = new ::std::vector<double>(0);
-//        this->connection_list = ::std::vector<std::shared_ptr<ConnectionFunctionGeneral>>(0);
-//        this->inward_adjacency = new ::std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
-//        this->outward_adjacency = new ::std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
-//
-//        this->neuron_layers_feedforward = new ::std::vector<std::vector<size_t> *>(0);
-//        this->neuron_layers_feedbackward = new ::std::vector<std::vector<size_t> *>(0);
 
-//        this->input_neuron_indices = new ::std::vector<size_t>(0);
-//        this->output_neuron_indices = new ::std::vector<size_t>(0);
 
         this->delete_weights = true;
         this->delete_biases = true;
@@ -1357,8 +936,6 @@ namespace lib4neuro {
         }
 
         /* Init variables containing indices of INPUT nad OUTPUT neurons */
-//        this->input_neuron_indices = new ::std::vector<size_t>(inp_dim);
-//        this->output_neuron_indices = new ::std::vector<size_t>(out_dim);
 
         this->input_neuron_indices = input_layer_neuron_indices;
         this->output_neuron_indices = current_layer_neuron_indices;
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index 5205b598..a8531d7b 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -137,7 +137,6 @@ namespace lib4neuro {
          * @param con Connection object to be added
          * @return Returns the index of the added connection among all the connections
          */
-//        size_t add_new_connection_to_list(ConnectionFunctionGeneral *con);
 
         size_t add_new_connection_to_list(std::shared_ptr<ConnectionFunctionGeneral> con);
 
diff --git a/src/Network/NeuralNetworkSerialization.h b/src/Network/NeuralNetworkSerialization.h
index 130a25ba..02889967 100644
--- a/src/Network/NeuralNetworkSerialization.h
+++ b/src/Network/NeuralNetworkSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURALNETWORKSERIALIZATION_H
 #define LIB4NEURO_NEURALNETWORKSERIALIZATION_H
diff --git a/src/Network/NeuralNetworkSumSerialization.h b/src/Network/NeuralNetworkSumSerialization.h
index a70adad5..5867fc2a 100644
--- a/src/Network/NeuralNetworkSumSerialization.h
+++ b/src/Network/NeuralNetworkSumSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURALNETWORKSUMSERIALIZATION_H
 #define LIB4NEURO_NEURALNETWORKSUMSERIALIZATION_H
diff --git a/src/Neuron/Neuron.cpp b/src/Neuron/Neuron.cpp
index 0dd48b73..3c7f3511 100644
--- a/src/Neuron/Neuron.cpp
+++ b/src/Neuron/Neuron.cpp
@@ -1,7 +1,6 @@
 #include "NeuronSerialization.h"
 
 BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::Neuron);
-//BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuronDifferentiable);
 
 
 namespace lib4neuro {
diff --git a/src/Neuron/NeuronBinary.cpp b/src/Neuron/NeuronBinary.cpp
index 9e7bbb80..b421e1f5 100644
--- a/src/Neuron/NeuronBinary.cpp
+++ b/src/Neuron/NeuronBinary.cpp
@@ -1,6 +1,3 @@
-//
-// Created by fluffymoo on 11.6.18.
-//
 
 #include <boost/serialization/export.hpp>
 
diff --git a/src/Neuron/NeuronBinarySerialization.h b/src/Neuron/NeuronBinarySerialization.h
index a09f0b23..4e3191e1 100644
--- a/src/Neuron/NeuronBinarySerialization.h
+++ b/src/Neuron/NeuronBinarySerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURON_BINARY_SERIALIZATION_H
 #define LIB4NEURO_NEURON_BINARY_SERIALIZATION_H
diff --git a/src/Neuron/NeuronConstant.cpp b/src/Neuron/NeuronConstant.cpp
index 9210b95c..b2965c45 100644
--- a/src/Neuron/NeuronConstant.cpp
+++ b/src/Neuron/NeuronConstant.cpp
@@ -7,8 +7,6 @@
 
 #include <boost/serialization/export.hpp>
 
-//#include "NeuronConstant.h"
-//#include "NeuronSerialization.h"
 #include "NeuronConstantSerialization.h"
 
 
diff --git a/src/Neuron/NeuronConstantSerialization.h b/src/Neuron/NeuronConstantSerialization.h
index 2880cd4f..6b658f47 100644
--- a/src/Neuron/NeuronConstantSerialization.h
+++ b/src/Neuron/NeuronConstantSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURON_CONSTANT_SERIALIZATION_H
 #define LIB4NEURO_NEURON_CONSTANT_SERIALIZATION_H
diff --git a/src/Neuron/NeuronLinear.cpp b/src/Neuron/NeuronLinear.cpp
index 0cd3aa8d..fbcb64f3 100644
--- a/src/Neuron/NeuronLinear.cpp
+++ b/src/Neuron/NeuronLinear.cpp
@@ -1,6 +1,3 @@
-//
-// Created by fluffymoo on 11.6.18.
-//
 
 #include <boost/serialization/export.hpp>
 
diff --git a/src/Neuron/NeuronLinearSerialization.h b/src/Neuron/NeuronLinearSerialization.h
index 38cab83b..8f3fc12a 100644
--- a/src/Neuron/NeuronLinearSerialization.h
+++ b/src/Neuron/NeuronLinearSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURONLINEARSERIALIZATION_H
 #define LIB4NEURO_NEURONLINEARSERIALIZATION_H
diff --git a/src/Neuron/NeuronLogistic.cpp b/src/Neuron/NeuronLogistic.cpp
index dd1469d7..22443e2a 100644
--- a/src/Neuron/NeuronLogistic.cpp
+++ b/src/Neuron/NeuronLogistic.cpp
@@ -1,6 +1,3 @@
-//
-// Created by fluffymoo on 11.6.18.
-//
 
 #include <boost/serialization/export.hpp>
 
diff --git a/src/Neuron/NeuronLogisticSerialization.h b/src/Neuron/NeuronLogisticSerialization.h
index 1d65fcba..ea9ffb85 100644
--- a/src/Neuron/NeuronLogisticSerialization.h
+++ b/src/Neuron/NeuronLogisticSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURONLOGISTICSERIALIZATION_H
 #define LIB4NEURO_NEURONLOGISTICSERIALIZATION_H
diff --git a/src/Neuron/NeuronSerialization.h b/src/Neuron/NeuronSerialization.h
index f2aca6f7..51260f3f 100644
--- a/src/Neuron/NeuronSerialization.h
+++ b/src/Neuron/NeuronSerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_NEURON_SERIALIZATION_H
 #define LIB4NEURO_NEURON_SERIALIZATION_H
diff --git a/src/NormalizationStrategy/NormalizationStrategy.cpp b/src/NormalizationStrategy/NormalizationStrategy.cpp
index 8cb7ebc6..66c13b78 100644
--- a/src/NormalizationStrategy/NormalizationStrategy.cpp
+++ b/src/NormalizationStrategy/NormalizationStrategy.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 21.11.18.
-//
 
 #include <cmath>
 #include <stdexcept>
@@ -40,7 +37,6 @@ double DoubleUnitStrategy::de_normalize(double n) {
         THROW_RUNTIME_ERROR("Data were not normalized, so de-normalization cannot progress!");
     }
 
-//    return 0.5 * (1 + (this->get_max_value() - this->get_min_value()) * n) + this->get_min_value();
     return 0.5 * ((1 + n) * (this->get_max_value() - this->get_min_value())) + this->get_min_value();
 }
 
diff --git a/src/NormalizationStrategy/NormalizationStrategy.h b/src/NormalizationStrategy/NormalizationStrategy.h
index 1b181087..f1ab46e8 100644
--- a/src/NormalizationStrategy/NormalizationStrategy.h
+++ b/src/NormalizationStrategy/NormalizationStrategy.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 21.11.18.
-//
 
 #ifndef LIB4NEURO_NORMALIZATIONSTRATEGY_H
 #define LIB4NEURO_NORMALIZATIONSTRATEGY_H
@@ -13,16 +10,6 @@
  */
 class NormalizationStrategy {
 protected:
-//    /**
-//     *
-//     */
-//    double max_value = std::numeric_limits<double>::quiet_NaN();
-//
-//    /**
-//     *
-//     */
-//    double min_value = std::numeric_limits<double>::quiet_NaN();
-//
 
     /**
      * Maximum (index 0) and minimum (index 1) input value
diff --git a/src/NormalizationStrategy/NormalizationStrategySerialization.h b/src/NormalizationStrategy/NormalizationStrategySerialization.h
index ffc4fbec..f42c09fd 100644
--- a/src/NormalizationStrategy/NormalizationStrategySerialization.h
+++ b/src/NormalizationStrategy/NormalizationStrategySerialization.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 22.11.18.
-//
 
 #ifndef LIB4NEURO_NORMALIZATIONSTRATEGYSERIALIZATION_H
 #define LIB4NEURO_NORMALIZATIONSTRATEGYSERIALIZATION_H
diff --git a/src/Simulator/Simulator.cpp b/src/Simulator/Simulator.cpp
index 24a66295..ba7f94a9 100644
--- a/src/Simulator/Simulator.cpp
+++ b/src/Simulator/Simulator.cpp
@@ -24,21 +24,8 @@ namespace lib4neuro {
             std::vector<std::vector<double>> &xi
             ) {
 
-//        this->neurons = new ::std::vector<Neuron *>(0);
-//        this->neuron_biases = new ::std::vector<double>(0);
-//        this->neuron_potentials = new ::std::vector<double>(0);
-//        this->neuron_bias_indices = new ::std::vector<int>(0);
-
-//        this->connection_weights = new ::std::vector<double>(0);
-//        this->connection_list = ::std::vector<std::shared_ptr<ConnectionFunctionGeneral>>(0);
-//        this->inward_adjacency = new ::std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
-//        this->outward_adjacency = new ::std::vector<std::vector<std::pair<size_t, size_t>> *>(0);
-//
-//        this->neuron_layers_feedforward = new ::std::vector<std::vector<size_t> *>(0);
-//        this->neuron_layers_feedbackward = new ::std::vector<std::vector<size_t> *>(0);
-
-//        this->input_neuron_indices = nullptr; //new ::std::vector<size_t>(0);
-//        this->output_neuron_indices = nullptr; // ::std::vector<size_t>(0);
+
+
 
         this->delete_weights = true;
         this->delete_biases = true;
@@ -87,7 +74,6 @@ namespace lib4neuro {
 
             //connection towards the bias neurons
             bias_layer_neuron_indices.resize(0);
-//            for( size_t nn = 0; nn < xi[hi].size(); ++nn ){
                 for(auto e : xi[hi]) {
                     std::shared_ptr<Neuron> new_neuron;
                     new_neuron.reset(new NeuronBiased(e));
@@ -219,8 +205,6 @@ namespace lib4neuro {
         }
 
         /* Init variables containing indices of INPUT nad OUTPUT neurons */
-//        this->input_neuron_indices = new ::std::vector<size_t>(inp_dim);
-//        this->output_neuron_indices = new ::std::vector<size_t>(out_dim);
 
         this->input_neuron_indices = input_layer_neuron_indices;
         this->output_neuron_indices = output_layer_neuron_indices;
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 4b9df8c6..d8d9fa03 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -82,16 +82,12 @@ namespace lib4neuro {
         this->dim_inn = m;
         this->n_equations = n_equations;
 
-//        this->solution = new NeuralNetwork();
 
-//        this->solution_inner_neurons = new std::vector<NeuronLogistic *>(0);
-//        this->solution_inner_neurons.reserve(m);
 
         /* input neurons */
         std::vector<size_t> input_set(this->dim_i);
         size_t idx;
         for (size_t i = 0; i < this->dim_i; ++i) {
-//            NeuronLinear *input_i = new NeuronLinear();  //f(x) = x
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
             idx = this->solution->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
@@ -112,7 +108,6 @@ namespace lib4neuro {
         /* inner neurons */
         size_t first_inner_neuron = 0;
         for (size_t i = 0; i < this->dim_inn; ++i) {
-//            NeuronLogistic *inner_i = new NeuronLogistic(); //f(x) = 1.0 / (1.0 + e^(-x))
             std::shared_ptr<NeuronLogistic> new_neuron2;
             new_neuron2.reset(new NeuronLogistic());
             this->solution_inner_neurons.push_back(new_neuron2);
@@ -147,18 +142,13 @@ namespace lib4neuro {
 
         this->map_multiindices2nn[initial_mi] = this->solution;
 
-//        this->differential_equations = new std::vector<NeuralNetworkSum *>(0);
         this->differential_equations.reserve(this->n_equations);
 
         for (unsigned int i = 0; i < this->n_equations; ++i) {
-//            NeuralNetworkSum *new_sum = new NeuralNetworkSum();
             std::shared_ptr<NeuralNetworkSum> new_sum;
             new_sum.reset(new NeuralNetworkSum());
             this->differential_equations.push_back(new_sum);
         }
-//
-//        this->errors_functions_types = new std::vector<ErrorFunctionType>(this->n_equations);
-//        this->errors_functions_data_sets = new std::vector<DataSet *>(this->n_equations);
 
         printf("done\n");
 
@@ -166,34 +156,7 @@ namespace lib4neuro {
 
     DESolver::~DESolver() {
 
-//        if (this->solution_inner_neurons) {
-//            delete this->solution_inner_neurons;
-//            this->solution_inner_neurons = nullptr;
-//        }
-//
-//        if (this->errors_functions_types) {
-//            delete this->errors_functions_types;
-//            this->errors_functions_types = nullptr;
-//        }
-//
-//        if (this->errors_functions_data_sets) {
-//            delete this->errors_functions_data_sets;
-//            this->errors_functions_data_sets = nullptr;
-//        }
-//
-//        if (this->differential_equations) {
-//            for (auto nns: *this->differential_equations) {
-//                delete nns;
-//            }
-//            delete this->differential_equations;
-//            this->differential_equations = nullptr;
-//        }
-
-//
-//        for (auto nn: this->map_multiindices2nn) {
-//            NeuralNetwork *n_to_delete = nn.second;
-//            delete n_to_delete;
-//        }
+
 
     }
 
@@ -224,7 +187,6 @@ namespace lib4neuro {
             }
         }
 
-//        NeuralNetwork *new_net = nullptr;
         std::shared_ptr<NeuralNetwork> new_net;
         /* we check whether the new multi-index is already present */
         if (map_multiindices2nn.find(alpha) != map_multiindices2nn.end()) {
@@ -245,7 +207,6 @@ namespace lib4neuro {
         std::vector<size_t> input_set(this->dim_i);
         size_t idx;
         for (size_t i = 0; i < this->dim_i; ++i) {
-//            NeuronLinear *input_i = new NeuronLinear();  //f(x) = x
             std::shared_ptr<Neuron> new_neuron;
             new_neuron.reset(new NeuronLinear());
             idx = new_net->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
@@ -266,7 +227,6 @@ namespace lib4neuro {
 
         /* the new partial derivative has degree of at least one */
         size_t first_inner_neuron = 0;
-//        NeuronLogistic *n_ptr = nullptr, *n_ptr2 = nullptr;
         std::shared_ptr<NeuronLogistic> n_ptr;
         std::shared_ptr<NeuronLogistic> n_ptr2;
         for (size_t i = 0; i < this->dim_inn; ++i) {
@@ -276,11 +236,6 @@ namespace lib4neuro {
                 n_ptr2 = n_ptr;
 
                 n_ptr = std::shared_ptr<NeuronLogistic>(n_ptr->get_derivative());
-//
-//                if (j > 0) {
-//                    delete n_ptr2;
-//                    n_ptr2 = nullptr;
-//                }
 
             }
             idx = new_net->add_neuron(n_ptr, BIAS_TYPE::EXISTING_BIAS,
@@ -382,8 +337,6 @@ namespace lib4neuro {
 //TODO instead use general method with Optimizer as its argument (create hierarchy of optimizers)
     void DESolver::solve(LearningMethod &learning_method) {
 
-//        NeuralNetwork *nn;
-//        DataSet *ds;
 
         std::shared_ptr<NeuralNetwork> nn;
         std::shared_ptr<DataSet> ds;
@@ -408,8 +361,6 @@ namespace lib4neuro {
         printf("error before optimization: %f\n", total_error.eval(nullptr));
 
         learning_method.optimize(total_error);
-//        std::shared_ptr<std::vector<double>> params;
-//        params.reset(learning_method.get_parameters());
         std::vector<double> params = *learning_method.get_parameters();
         this->solution->copy_parameter_space(&params);
 
@@ -433,23 +384,12 @@ namespace lib4neuro {
 
         this->differential_equations.at(equation_idx)->eval_single(input, output, weight_and_biases.get());
 
-//    printf("Input: ");
-//    for( auto e: input ){
-//        printf("%f, ", e);
-//    }
-//    printf("\nOutput: ");
-//    for( auto e: output ){
-//        printf("%f, ", e);
-//    }
-//    printf("\n");
 
         return output[0];
     }
 
     double DESolver::eval_total_error(std::vector<double> &weights_and_biases) {
 
-//        NeuralNetwork *nn;
-//        DataSet *ds;
 
         std::shared_ptr<NeuralNetwork> nn;
         std::shared_ptr<DataSet> ds;
diff --git a/src/boost_test_lib_dummy.cpp b/src/boost_test_lib_dummy.cpp
index 38992727..f8ea847f 100644
--- a/src/boost_test_lib_dummy.cpp
+++ b/src/boost_test_lib_dummy.cpp
@@ -1,6 +1,3 @@
-//
-// Created by David on 11.07.2018.
-//
 
 
 #ifndef BOOST_TEST_MODULE
diff --git a/src/constants.h b/src/constants.h
index e31a0a3a..84120a54 100644
--- a/src/constants.h
+++ b/src/constants.h
@@ -1,6 +1,3 @@
-//
-// Created by fluffymoo on 11.6.18.
-//
 
 #ifndef INC_4NEURO_CONSTANTS_H
 #define INC_4NEURO_CONSTANTS_H
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index 2514bf08..73c7ff52 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -2,9 +2,6 @@
  * Basic example using particle swarm method to train the network
  */
 
-//
-// Created by martin on 7/16/18.
-//
 
 #include <vector>
 #include <iostream>
@@ -49,8 +46,6 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e
     );
     swarm_01.optimize( ef );
 
-//    std::shared_ptr<std::vector<double>> parameters;
-//    parameters.reset(swarm_01.get_parameters());
     net.copy_parameter_space(swarm_01.get_parameters());
 
     /* ERROR CALCULATION */
@@ -65,8 +60,6 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction
 
     gd.optimize( ef );
 
-//    std::shared_ptr<std::vector<double>> parameters;
-//    parameters.reset(gd.get_parameters());
     net.copy_parameter_space(gd.get_parameters());
 
     /* ERROR CALCULATION */
@@ -100,20 +93,16 @@ int main() {
     l4n::NeuralNetwork net;
 
     /* Input neurons */
-//    l4n::NeuronLinear *i1 = new l4n::NeuronLinear( );  //f(x) = x
-//    l4n::NeuronLinear *i2 = new l4n::NeuronLinear( );  //f(x) = x
     std::shared_ptr<l4n::NeuronLinear> i1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> i2 = std::make_shared<l4n::NeuronLinear>();
 
     /* Output neuron */
-//    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
     std::shared_ptr<l4n::NeuronLinear> o1 = std::make_shared<l4n::NeuronLinear>();
 
     /* Adding neurons to the net */
     size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
     size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
     size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NO_BIAS);
-//
 
     /* Adding connections */
     net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index 3d193053..c8fe35e3 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -2,9 +2,6 @@
  * Example of a neural network with reused edge weights
  */
 
-//
-// Created by Michal on 7/17/18.
-//
 
 #include <vector>
 
@@ -48,8 +45,6 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e
     );
     swarm_01.optimize( ef );
 
-//    std::shared_ptr<std::vector<double>> parameters;
-//    parameters.reset(swarm_01.get_parameters());
     net.copy_parameter_space(swarm_01.get_parameters());
 
     std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
@@ -62,8 +57,6 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction
 
     gd.optimize( ef );
 
-//    std::shared_ptr<std::vector<double>> parameters;
-//    parameters.reset(gd.get_parameters());
     net.copy_parameter_space(gd.get_parameters());
 
     /* ERROR CALCULATION */
@@ -101,17 +94,12 @@ int main() {
     l4n::NeuralNetwork net;
 
     /* Input neurons */
-//    l4n::NeuronLinear *i1 = new l4n::NeuronLinear( );  //f(x) = x
-//    l4n::NeuronLinear *i2 = new l4n::NeuronLinear( );  //f(x) = x
     std::shared_ptr<l4n::NeuronLinear> i1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> i2 = std::make_shared<l4n::NeuronLinear>();
 
-//    l4n::NeuronLinear *i3 = new l4n::NeuronLinear( );  //f(x) = x
     std::shared_ptr<l4n::NeuronLinear> i3 = std::make_shared<l4n::NeuronLinear>();
 
     /* Output neurons */
-//    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
-//    l4n::NeuronLinear *o2 = new l4n::NeuronLinear( );  //f(x) = x
     std::shared_ptr<l4n::NeuronLinear> o1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> o2 = std::make_shared<l4n::NeuronLinear>();
 
@@ -147,7 +135,6 @@ int main() {
     net.randomize_weights();
     optimize_via_particle_swarm( net, mse );
 
-//    printf("evaluation of error at point (%f, %f) => %f\n", weights[0], weights[1], mse.eval(weights));
 
     /* GRADIENT DESCENT LEARNING */
     net.randomize_weights();
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index 942fedbe..c278101f 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -24,7 +24,6 @@ double get_difference(std::vector<double> &a, std::vector<double> &b){
 
     for( size_t i = 0; i < a.size(); ++i ){
 
-//        std::cout << a[i] << " - " << b[i] << std::endl;
 
         m = a[i]-b[i];
         out += m * m;
@@ -78,8 +77,6 @@ int main(int argc, char** argv) {
 
         size_t n_good = 0, n_bad = 0;
 
-//        nn1.write_weights();
-//        nn1.write_biases();
         for(int i = 0; i < n_tests; ++i){
 
             std::vector<double> input(1);
diff --git a/src/examples/net_test_ode_1.cpp b/src/examples/net_test_ode_1.cpp
index 6dfa6e97..8d0cff53 100644
--- a/src/examples/net_test_ode_1.cpp
+++ b/src/examples/net_test_ode_1.cpp
@@ -163,15 +163,6 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
     }
 
     /* CHEBYSCHEV TRAIN SET */
-//    alpha = PI / (train_size - 1);
-//    frac = 0.5 * (d1_e - d1_s);
-//    for(unsigned int i = 0; i < train_size; ++i){
-//        inp = {(std::cos(alpha * i) + 1.0) * frac + d1_s};
-//        out = {0.0};
-//        data_vec_g.emplace_back(std::make_pair(inp, out));
-//
-//        test_points[i] = inp[0];
-//    }
    l4n::DataSet ds_00(&data_vec_g);
 
     /* TRAIN DATA FOR DIRICHLET BC */
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index 63b6b10d..0a733500 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -39,14 +39,11 @@ int main() {
     l4n::NeuralNetwork net;
 
     /* Input neurons */
-//    l4n::NeuronLinear *i1 = new l4n::NeuronLinear( );  //f(x) = x
-//    l4n::NeuronLinear *i2 = new l4n::NeuronLinear( );  //f(x) = x
 
     std::shared_ptr<l4n::NeuronLinear> i1 = std::make_shared<l4n::NeuronLinear>();
     std::shared_ptr<l4n::NeuronLinear> i2 = std::make_shared<l4n::NeuronLinear>();
 
     /* Output neuron */
-//    l4n::NeuronLinear *o1 = new l4n::NeuronLinear( );  //f(x) = x
     std::shared_ptr<l4n::NeuronLinear> o1 = std::make_shared<l4n::NeuronLinear>();
 
 
diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp
index dea822d9..3dd4e75a 100644
--- a/src/examples/seminar.cpp
+++ b/src/examples/seminar.cpp
@@ -116,8 +116,6 @@ int main() {
     );
     swarm_01.optimize( mse );
 
-//    std::shared_ptr<std::vector<double>> parameters;
-//    parameters.reset(swarm_01.get_parameters( ));
     XOR.copy_parameter_space(swarm_01.get_parameters( ));
 
     /* ERROR CALCULATION */
diff --git a/src/examples/simulator.cpp b/src/examples/simulator.cpp
index 8691a7a5..90f9d074 100644
--- a/src/examples/simulator.cpp
+++ b/src/examples/simulator.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 25.11.18.
-//
 
 
 #include <iostream>
@@ -94,14 +91,9 @@ int main(int argc, char** argv) {
         std::shared_ptr<l4n::LearningMethod> new_learning_method;
         new_learning_method.reset(&rnd);
         learning_sequence.add_learning_method( new_learning_method );
-//        learning_sequence.add_learning_method( &ps );
-//        learning_sequence.add_learning_method( &gs );
         std::shared_ptr<l4n::LearningMethod> new_learning_method2;
         new_learning_method2.reset(&leven);
         learning_sequence.add_learning_method( new_learning_method2 );
-//        learning_sequence.add_learning_method( &gs_ );
-//        learning_sequence.add_learning_method( &gs_si );
-//        learning_sequence.add_learning_method( &gs );
 
         /* Weight and bias randomization in the network accordingly to the uniform distribution */
         nn1.randomize_parameters();
@@ -114,14 +106,11 @@ int main(int argc, char** argv) {
 
         /* PHASE 4 - TESTING DATA */
 
-//        /* Output file specification */
         std::string filename = "simulator_output.txt";
         std::ofstream output_file(filename);
         if (!output_file.is_open()) {
             throw std::runtime_error("File '" + filename + "' can't be opened!");
         }
-//
-//        /* Neural network loading */
         l4n::NeuralNetwork nn3("test_net_Gradient_Descent.4n");
 
         /* Check of the saved network - write to the file */
@@ -129,25 +118,13 @@ int main(int argc, char** argv) {
         nn3.write_stats(&output_file);
         nn3.write_weights(&output_file);
         nn3.write_biases(&output_file);
-//
-//        /* Evaluate network on an arbitrary data-set and save results into the file */
         l4n::CSVReader reader3("/home/fluffymoo/Dropbox/data_BACK_RH_1.csv", ";", true);  // File, separator, skip 1st line
         reader3.read();  // Read from the file
-//
-//        /* Create data set for both the testing of the neural network */
-//        /* Specify which columns are inputs or outputs */
-//
         std::shared_ptr<l4n::DataSet> ds3 = reader3.get_data_set(&inputs, &outputs);  // Creation of data-set for NN
         if(normalize_data){
             ds3.get()->normalize();  // Normalization of data to prevent numerical problems
         }
-//
-//        output_file << std::endl << "Evaluating network on the dataset: " << std::endl;
-//        ds3.store_data_text(&output_file);
-//
         output_file << "Output and the error:" << std::endl;
-//
-//        /* Error function */
         l4n::MSE mse3(&nn3, ds3.get());  // First parameter - neural network, second parameter - data-set
 
         mse3.eval_on_data_set(ds3.get(), &output_file, nullptr, normalize_data, true);
diff --git a/src/examples/simulator_1_2.cpp b/src/examples/simulator_1_2.cpp
index b692ee2d..360083e5 100644
--- a/src/examples/simulator_1_2.cpp
+++ b/src/examples/simulator_1_2.cpp
@@ -56,7 +56,6 @@ int main(int argc, char** argv) {
 
         /* for each valve (1 in this example) setup the magnitudes of change */
         std::vector<std::vector<double>> xi;
-//        xi.push_back({ds1.get_data()->at(0).second});
         xi.push_back({1.0});
 
         /* The simulator2 object */
@@ -86,9 +85,7 @@ int main(int argc, char** argv) {
                                                                                                          0.7,
                                                                                                          n_particles_swarm,
                                                                                                          max_n_iters_swarm) );
-//        learning_sequence.add_learning_method( new_learning_method2 );
 
-//        std::shared_ptr<l4n::LearningMethod> new_learning_method3 = std::make_shared<l4n::LevenbergMarquardt>(l4n::LevenbergMarquardt(max_n_iters_gradient_lm, batch_size, prec_lm ) );
         auto new_learning_method3 = std::make_shared<l4n::LevenbergMarquardt>(l4n::LevenbergMarquardt(max_n_iters_gradient_lm, batch_size, prec_lm ) );
         learning_sequence.add_learning_method( new_learning_method3 );
 
diff --git a/src/examples/x2_fitting.cpp b/src/examples/x2_fitting.cpp
index 2d9bb56d..6e00de09 100644
--- a/src/examples/x2_fitting.cpp
+++ b/src/examples/x2_fitting.cpp
@@ -1,6 +1,3 @@
-//
-// Created by martin on 17.1.19.
-//
 
 #include <iostream>
 
diff --git a/src/exceptions.h b/src/exceptions.h
index e105282f..3bb3b0ba 100644
--- a/src/exceptions.h
+++ b/src/exceptions.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/17/18.
-//
 
 #ifndef LIB4NEURO_EXCEPTIONS_H
 #define LIB4NEURO_EXCEPTIONS_H
diff --git a/src/exprtk.cpp b/src/exprtk.cpp
index cb6a8ebb..d70f36ff 100644
--- a/src/exprtk.cpp
+++ b/src/exprtk.cpp
@@ -1,5 +1,2 @@
-//
-// Created by martin on 8/8/18.
-//
 
 #include <exprtk.hpp>
\ No newline at end of file
diff --git a/src/message.h b/src/message.h
index 7fcb06f5..ea134969 100644
--- a/src/message.h
+++ b/src/message.h
@@ -1,6 +1,3 @@
-//
-// Created by martin on 9/8/18.
-//
 
 #ifndef PROJECT_MESSAGE_H
 #define PROJECT_MESSAGE_H
@@ -24,6 +21,4 @@
 #define WRITE_TO_OFS_DEBUG(ofs, msg)
 #endif // L4N_DEBUG
 
-
-
 #endif //PROJECT_MESSAGE_H
diff --git a/src/settings.h b/src/settings.h
index 4ed3e186..c7c72710 100644
--- a/src/settings.h
+++ b/src/settings.h
@@ -11,7 +11,6 @@
 /**
  * If defined, the NN feed-forward will print out whats happening
  */
-//#define VERBOSE_NN_EVAL
 
 #ifdef _WINDOWS
 #define LIB4NEURO_API __declspec(dllexport)
diff --git a/src/tests/DataSet_test.cpp b/src/tests/DataSet_test.cpp
index ac175166..b085b99e 100644
--- a/src/tests/DataSet_test.cpp
+++ b/src/tests/DataSet_test.cpp
@@ -66,160 +66,5 @@ BOOST_AUTO_TEST_SUITE(DataSet_test)
         BOOST_CHECK_NO_THROW(new lib4neuro::DataSet(&data_vec));
     }
 
-///**
-// * Test of get_data method
-// */
-//    BOOST_AUTO_TEST_CASE(DataSet_get_data_test) {
-//        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-//        std::vector<double> inp, out;
-//
-//        for (int i = 0; i < 1; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//        lib4neuro::DataSet DataSet(&data_vec);
-//
-//        //test of equal data
-//       BOOST_CHECK_EQUAL(0, DataSet.get_data()->at(0).first.at(0));
-//       BOOST_CHECK_EQUAL(4, DataSet.get_data()->at(0).second.at(0));
-//
-//    }
-//
-///**
-// * Test of add_data_pair method
-// */
-//    BOOST_AUTO_TEST_CASE(DataSet_add_data_pair_test) {
-//        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-//        std::vector<double> inp, out;
-//
-//        for (int i = 0; i < 3; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//
-//        lib4neuro::DataSet DataSet(&data_vec);
-//
-//        inp.clear();
-//        out.clear();
-//        for (int i = 8; i < 11; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//
-//        DataSet.add_data_pair(inp, out);
-//
-//        // Test of correct add of input
-//        BOOST_CHECK_EQUAL(8, DataSet.get_data()->at(1).first.at(0));
-//        // Test of correct add of output
-//        BOOST_CHECK_EQUAL(12, DataSet.get_data()->at(1).second.at(0));
-//
-//    }
-//
-//    /**
-//     * Test of get_input_dim and get_output_dim methods
-//     */
-//    BOOST_AUTO_TEST_CASE(DataSet_dimension_test) {
-//        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-//        std::vector<double> inp, out;
-//
-//        for (int i = 0; i < 3; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//
-//        lib4neuro::DataSet DataSet(&data_vec);
-//
-//        //Test of correct input dimension
-//        BOOST_CHECK_EQUAL(3, DataSet.get_input_dim());
-//        //Test of correct output dimension
-//        BOOST_CHECK_EQUAL(3, DataSet.get_output_dim());
-//    }
-//
-///**
-// * Test of get_n_elements method
-// */
-//    BOOST_AUTO_TEST_CASE(DataSet_get_number_of_elements_test) {
-//        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-//        std::vector<double> inp, out;
-//
-//        for (int i = 0; i < 3; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//        inp.clear();
-//        out.clear();
-//        for (int i = 8; i < 11; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//
-//        lib4neuro::DataSet DataSet(&data_vec);
-//
-//        //Test of correct number of elements
-//        BOOST_CHECK_EQUAL(2, DataSet.get_n_elements());
-//    }
-//
-///**
-// * Test of print_data method
-// */
-//    BOOST_AUTO_TEST_CASE(DataSet_print_data_test) {
-//        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-//        std::vector<double> inp, out;
-//
-//        for (int i = 0; i < 1; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//        lib4neuro::DataSet DataSet(&data_vec);
-//		std::stringstream buffer;
-//		std::streambuf * old = std::cout.rdbuf(buffer.rdbuf());
-//		DataSet.print_data();
-//
-//
-//        //Test of correct print of DataSet
-//		std::string text = buffer.str();
-//        BOOST_CHECK(text.compare("0 -> 4 \n"));
-//		std::cout.rdbuf(old);
-//
-//    }
-//
-///**
-// * Test of store_text method
-// */
-//    BOOST_AUTO_TEST_CASE(DataSet_store_text_test) {
-//        std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
-//        std::vector<double> inp, out;
-//
-//        for (int i = 0; i < 3; i++) {
-//            inp.push_back(i);
-//            out.push_back(i + 4);
-//        }
-//
-//        data_vec.emplace_back(std::make_pair(inp, out));
-//
-//        lib4neuro::DataSet DataSet(&data_vec);
-//        int elements = DataSet.get_n_elements();
-//        std::string filename = "testDataSet";
-//        DataSet.store_text(filename);
-//
-//        //Test of correct file creations
-//        lib4neuro::DataSet newDataSet(filename);
-//
-//        //Test of correct number of element from lib4neuro::DataSet from file
-//        BOOST_CHECK_EQUAL(elements, newDataSet.get_n_elements());
-//
-//        // removing of created file
-////        remove(filename);
-//    }
 
 BOOST_AUTO_TEST_SUITE_END()
-- 
GitLab