diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index a492ddeb05c70f0eb209e2fbf2bba39eabe7f151..9a94bb54c0e91eefff4586d9f5e5199d1b649246 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -2,12 +2,11 @@
  * DESCRIPTION OF THE FILE
  *
  * @author Michal KravÄŤenko
- * @date 13.6.18 - 
+ * @date 13.6.18 -
  */
 
 #include <iostream>
-#include <4neuro.h>
-#include <NetConnection/ConnectionFunctionConstant.h>
+#include "../NetConnection/ConnectionFunctionConstant.h"
 
 #include "message.h"
 #include "NeuralNetwork.h"
@@ -271,9 +270,9 @@ namespace lib4neuro {
         double potential, bias;
         int    bias_idx;
 
-        this->copy_parameter_space(custom_weights_and_biases);
+        this->copy_parameter_space( custom_weights_and_biases );  // TODO rewrite, so the original parameters are not edited!
 
-        this->analyze_layer_structure();
+        this->analyze_layer_structure( );
 
         /* reset of the output and the neuron potentials */
         ::std::fill(output.begin(),
@@ -292,6 +291,7 @@ namespace lib4neuro {
         for (auto layer: this->neuron_layers_feedforward) {
             /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
 
+//#pragma omp parallel for collapse(3)
             for (auto si: *layer) {
                 bias      = 0.0;
                 bias_idx  = this->neuron_bias_indices.at(si);
@@ -985,7 +985,7 @@ namespace lib4neuro {
                 for (auto ind : previous_layer_neuron_indices) {
                     this->add_connection_simple(ind,
                                                 neuron_id,
-                                                l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+                                                lib4neuro::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
                 }
             }
         }
@@ -1007,7 +1007,7 @@ namespace lib4neuro {
             for (auto ind : previous_layer_neuron_indices) {
                 this->add_connection_simple(ind,
                                             neuron_id,
-                                            l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+                                            lib4neuro::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
             }
         }
 
@@ -1057,5 +1057,13 @@ namespace lib4neuro {
         return std::make_pair(*std::min_element(this->connection_weights.begin(), this->connection_weights.end()),
                               *std::max_element(this->connection_weights.begin(), this->connection_weights.end()));
     }
+
+    size_t NeuralNetwork::get_input_neurons_number() {
+        return this->input_neuron_indices.size();
+    }
+
+    size_t NeuralNetwork::get_output_neurons_number() {
+        return this->output_neuron_indices.size();
+    }
 }
 
diff --git a/src/examples/acsf2.cpp b/src/examples/acsf2.cpp
index 0efb2cf294b63ca19e40074e7fd36fbade5db666..d4fca3e75e43a983636bbd5c30f449caf5db36aa 100644
--- a/src/examples/acsf2.cpp
+++ b/src/examples/acsf2.cpp
@@ -3,7 +3,7 @@
 //
 #include <exception>
 
-#include <4neuro.h>
+#include <4neuro_public.h>
 
 void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
                                  l4n::ErrorFunction& ef) {
@@ -82,7 +82,7 @@ double optimize_via_LBMQ(l4n::NeuralNetwork& net,
 	double tolerance = 1e-6;
 	double tolerance_gradient = tolerance;
 	double tolerance_parameters = tolerance;
-	
+
     std::cout
         << "***********************************************************************************************************************"
         << std::endl;
@@ -189,7 +189,7 @@ int main() {
         // optimize_via_particle_swarm(net, mse);
         double err1 = optimize_via_LBMQ(net, mse);
         double err2 = optimize_via_gradient_descent(net, mse);
-		
+
 		if(err2 > 0.00001) {
 			throw std::runtime_error("Training was incorrect!");
 		}
@@ -213,4 +213,4 @@ int main() {
     }
 
     return 0;
-}
\ No newline at end of file
+}
diff --git a/src/examples/dev_sandbox.cpp b/src/examples/dev_sandbox.cpp
index 8b4cacca1b93ac1585a225dde996f469313e630b..7cd5277aa4833e42712ea9c130600143e87d1679 100644
--- a/src/examples/dev_sandbox.cpp
+++ b/src/examples/dev_sandbox.cpp
@@ -1,9 +1,11 @@
 //
 // Created by martin on 20.08.19.
 //
-#include <exception>
 
-#include <4neuro.h>
+#define ARMA_ALLOW_FAKE_GCC
+#include <4neuro_public.h>
+#include "../mpi_wrapper.h"
+
 
 void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
                                  l4n::ErrorFunction& ef) {
@@ -54,18 +56,23 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
 }
 
 double optimize_via_gradient_descent(l4n::NeuralNetwork& net,
-                                   l4n::ErrorFunction& ef) {
+                                     l4n::ErrorFunction& ef) {
 
     std::cout
         << "***********************************************************************************************************************"
         << std::endl;
-    l4n::GradientDescentBB gd(1e-6,
-                              1000,
-                              60000);
 
-    gd.optimize(ef);
+    for( double tol = 1e-1; tol > 1e-6; tol *= 1e-1 ){
+        l4n::GradientDescentBB gd(tol,
+                                  500,
+                                  500);
+
+        l4n::LazyLearning lazy_wrapper( gd, tol );
+        lazy_wrapper.optimize(ef);
+    }
+//    gd.optimize(ef);
 
-    net.copy_parameter_space(gd.get_parameters());
+//    net.copy_parameter_space(gd.get_parameters());
 
     /* ERROR CALCULATION */
     double err = ef.eval(nullptr);
@@ -76,65 +83,93 @@ double optimize_via_gradient_descent(l4n::NeuralNetwork& net,
 }
 
 double optimize_via_LBMQ(l4n::NeuralNetwork& net,
-                                   l4n::ErrorFunction& ef) {
-
-	size_t max_iterations = 10000;
-	size_t batch_size = 0;
-	double tolerance = 1e-6;
-	double tolerance_gradient = tolerance;
-	double tolerance_parameters = tolerance;
-	
+                         l4n::ErrorFunction& ef) {
+
+    size_t max_iterations = 200;
+    size_t batch_size = 0;
+    double tolerance = 1e-4;
+    double tolerance_gradient = tolerance;
+    double tolerance_parameters = tolerance;
+
     std::cout
         << "***********************************************************************************************************************"
         << std::endl;
-    l4n::LevenbergMarquardt lm(
-						   max_iterations,
-                           batch_size,
-                           tolerance,
-                           tolerance_gradient,
-                           tolerance_parameters
-						   );
+//    for( double tol = 1; tol > tolerance; tol *= 0.5 ){
+        l4n::LevenbergMarquardt lm(
+            max_iterations,
+            batch_size,
+            tolerance,
+            tolerance,
+            tolerance
+        );
 
-    lm.optimize( ef );
+        l4n::LazyLearning lazy_wrapper( lm, tolerance );
+        lazy_wrapper.optimize(ef);
+//        lm.optimize(ef);
+//        break;
+//    }
 
-    net.copy_parameter_space(lm.get_parameters());
+//    lm.optimize( ef );
+
+   // net.copy_parameter_space(lm.get_parameters());
 
     /* ERROR CALCULATION */
     double err = ef.eval(nullptr);
     // std::cout << "Run finished! Error of the network[Levenberg-Marquardt]: " << err << std::endl;
 
     /* Just for validation test purposes - NOT necessary for the example to work! */
-	return err;
+    return err;
 }
 
-int main() {
+double optimize_via_NelderMead(l4n::NeuralNetwork& net, l4n::ErrorFunction& ef) {
+    l4n::NelderMead nm(500, 200);
 
-    try{
+    nm.optimize(ef);
+    net.copy_parameter_space(nm.get_parameters());
 
-        /* Specify cutoff functions */
-//        l4n::CutoffFunction1 cutoff1(10.1);
-        l4n::CutoffFunction2 cutoff1(8);
-        //l4n::CutoffFunction2 cutoff2(25);
-//        l4n::CutoffFunction2 cutoff2(15.2);
-//        l4n::CutoffFunction2 cutoff4(10.3);
-//        l4n::CutoffFunction2 cutoff5(12.9);
-//        l4n::CutoffFunction2 cutoff6(11);
+    /* ERROR CALCULATION */
+    double err = ef.eval(nullptr);
+    std::cout << "Run finished! Error of the network[Nelder-Mead]: " << err << std::endl;
 
-        /* Specify symmetry functions */
-//        l4n::G1 sym_f1(&cutoff1);
-        l4n::G2 sym_f2(&cutoff1, 2.09, 0.8);
-        l4n::G2 sym_f3(&cutoff1, 0.01, 0.04);
-//        l4n::G2 sym_f4(&cutoff2, 0.02, 0.04);
-//        l4n::G2 sym_f5(&cutoff2, 2.09, 0.04);
+    /* Just for validation test purposes - NOT necessary for the example to work! */
+    return err;
 
+}
+
+
+int main() {
+	MPI_INIT
+    try{
 
-//        l4n::G3 sym_f4(&cutoff4, 0.3);
-//        l4n::G4 sym_f5(&cutoff5, 0.05, true, 0.05);
-//        l4n::G4 sym_f6(&cutoff5, 0.05, false, 0.05);
-//        l4n::G4 sym_f7(&cutoff6, 0.5, true, 0.05);
-//        l4n::G4 sym_f8(&cutoff6, 0.5, false, 0.05);
+         /* Specify cutoff functions */
+        l4n::CutoffFunction2 cutoff2(8);
 
-        std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f2, &sym_f3}; //, &sym_f4, &sym_f5}; //, &sym_f6, &sym_f7, &sym_f8};
+        /* Specify symmetry functions */
+        l4n::G2 sym_f1(&cutoff2, 0.00, 0);
+        l4n::G2 sym_f2(&cutoff2, 0.02, 1);
+        l4n::G2 sym_f3(&cutoff2, 0.04, 2);
+        l4n::G2 sym_f4(&cutoff2, 0.06, 3);
+        l4n::G2 sym_f5(&cutoff2, 0.08, 4);
+        l4n::G2 sym_f6(&cutoff2, 0.10, 5);
+        l4n::G2 sym_f7(&cutoff2, 0.12, 6);
+        l4n::G2 sym_f8(&cutoff2, 0.14, 7);
+        l4n::G2 sym_f9(&cutoff2, 0.16, 8);
+
+        l4n::G5 sym_f10(&cutoff2, 0, -1, 0);
+        l4n::G5 sym_f11(&cutoff2, 0, -1, 3);
+        l4n::G5 sym_f12(&cutoff2, 0, -1, 6);
+        l4n::G5 sym_f13(&cutoff2, 0, -1, 9);
+        l4n::G5 sym_f14(&cutoff2, 0, -1, 12);
+        l4n::G5 sym_f15(&cutoff2, 0, -1, 15);
+
+
+        std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f1,
+                                                                &sym_f2,
+                                                                &sym_f3,
+                                                                &sym_f4,
+                                                                &sym_f5,
+                                                                &sym_f6
+															};
 
         l4n::Element helium = l4n::Element("He",
                                            helium_sym_funcs);
@@ -142,46 +177,81 @@ int main() {
         elements[l4n::ELEMENT_SYMBOL::He] = &helium;
 
         /* Read data */
-        l4n::XYZReader reader("../../data/HE4+T0.xyz", true);
+        l4n::XYZReader reader("../../data/HE4+T0_000200.xyz", true);
         reader.read();
 
         std::cout << "Finished reading data" << std::endl;
 
-        std::shared_ptr<l4n::DataSet> ds = reader.get_acsf_data_set(elements);
+        std::shared_ptr<l4n::DataSet> ds = reader.get_acsf_data_set( elements );
+        // ds->print_data();
 
         /* Create a neural network */
         std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<unsigned int>> n_hidden_neurons;
-        n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {5, 3, 1};
+        n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {25, 15, 1};
 
         std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<l4n::NEURON_TYPE>> type_hidden_neurons;
-		for(int i = 0; i < n_hidden_neurons[l4n::ELEMENT_SYMBOL::He].size() - 1; ++i){
-			type_hidden_neurons[l4n::ELEMENT_SYMBOL::He].push_back(l4n::NEURON_TYPE::LOGISTIC);
-		}
-		type_hidden_neurons[l4n::ELEMENT_SYMBOL::He].push_back(l4n::NEURON_TYPE::LINEAR);
+        type_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR};
 
         l4n::ACSFNeuralNetwork net(elements, *reader.get_element_list(), reader.contains_charge(), n_hidden_neurons, type_hidden_neurons);
 
-        l4n::MSE mse(&net, ds.get());
-
+        l4n::MSE mse(&net, ds.get(), false);
 
         net.randomize_parameters();
-         // optimize_via_particle_swarm(net, mse);
-       double err1 = optimize_via_LBMQ(net, mse);
-        double err2 = optimize_via_gradient_descent(net, mse);
-		
-        std::cout << "Weights: " << net.get_min_max_weight().first << " " << net.get_min_max_weight().second << std::endl;
+
+        for(size_t i = 0; i < ds->get_data()->at(0).first.size(); i++) {
+            std::cout << ds->get_data()->at(0).first.at(i) << " ";
+            if(i % 2 == 1) {
+                std::cout << std::endl;
+            }
+        }
+        std::cout << "----" << std::endl;
+
+        l4n::ACSFParametersOptimizer param_optim(&mse, &reader);
+        std::vector<l4n::SYMMETRY_FUNCTION_PARAMETER> fitted_params = {l4n::SYMMETRY_FUNCTION_PARAMETER::EXTENSION,
+                                                                       l4n::SYMMETRY_FUNCTION_PARAMETER::SHIFT_MAX,
+                                                                       l4n::SYMMETRY_FUNCTION_PARAMETER::SHIFT,
+                                                                       l4n::SYMMETRY_FUNCTION_PARAMETER::ANGULAR_RESOLUTION};
+//                                                                       l4n::SYMMETRY_FUNCTION_PARAMETER::PERIOD_LENGTH};
+
+        // param_optim.fit_ACSF_parameters(fitted_params,
+                                        // false,
+                                        // 50,
+                                        // 10,
+                                        // 1e-5,
+                                        // 0.98,
+                                        // 0.085,
+                                        // 1e-6);
+
+        for(size_t i = 0; i < mse.get_dataset()->get_data()->at(0).first.size(); i++) {
+            std::cout << mse.get_dataset()->get_data()->at(0).first.at(i) << " ";
+            if(i % 2 == 1) {
+                std::cout << std::endl;
+            }
+        }
+        std::cout << "----" << std::endl;
+
+
+//         optimize_via_particle_swarm(net, mse);
+//
+//
+////        optimize_via_NelderMead(net, mse);
+//
+        double err1 = optimize_via_LBMQ(net, mse);
+//        double err2 = optimize_via_gradient_descent(net, mse);
+
+//        std::cout << "Weights: " << net.get_min_max_weight().first << " " << net.get_min_max_weight().second << std::endl;
 
         /* Print fit comparison with real data */
         std::vector<double> output;
         output.resize(1);
 
-        for(auto e : *ds->get_data()) {
-            for(unsigned int i = 0; i < e.first.size(); i++) {
-                std::cout << e.first.at(i) << " ";
-                if(i % 2 == 1) {
-                    std::cout << std::endl;
-                }
-            }
+        for(auto e : *mse.get_dataset()->get_data()) {
+//            for(size_t i = 0; i < e.first.size(); i++) {
+//                std::cout << e.first.at(i) << " ";
+//                if(i % 2 == 1) {
+//                    std::cout << std::endl;
+//                }
+//            }
             std::cout << "OUTS (DS, predict): " << e.second.at(0) << " ";
             net.eval_single(e.first, output);
             std::cout << output.at(0) << std::endl;
@@ -192,5 +262,12 @@ int main() {
         exit(EXIT_FAILURE);
     }
 
+//    arma::Mat<double> m = {{1,2,3}, {4,5,6}, {7,8,9}};
+//    arma::Col<double> v = arma::conv_to<std::vector<double>(m);
+
+
+
+//    std::cout << arma::stddev(m) << std::endl;
+	MPI_FINISH
     return 0;
 }