diff --git a/Makefile b/Makefile
index dedecaaabdcd73463ac3a45fdfc75308e193b297..dccdaa5b2ed8c4d6f9c09a830d61e1641770bffa 100644
--- a/Makefile
+++ b/Makefile
@@ -111,108 +111,121 @@ depend:
 .PHONY : depend
 
 #=============================================================================
-# Target rules for targets named NeuralNetworkSum_test
+# Target rules for targets named net_test_2
 
 # Build rule for target.
-NeuralNetworkSum_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 NeuralNetworkSum_test
-.PHONY : NeuralNetworkSum_test
+net_test_2: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 net_test_2
+.PHONY : net_test_2
 
 # fast build rule for target.
-NeuralNetworkSum_test/fast:
-	$(MAKE) -f build/CMakeFiles/NeuralNetworkSum_test.dir/build.make build/CMakeFiles/NeuralNetworkSum_test.dir/build
-.PHONY : NeuralNetworkSum_test/fast
+net_test_2/fast:
+	$(MAKE) -f build/CMakeFiles/net_test_2.dir/build.make build/CMakeFiles/net_test_2.dir/build
+.PHONY : net_test_2/fast
 
 #=============================================================================
-# Target rules for targets named particle_test
+# Target rules for targets named neuron_test
 
 # Build rule for target.
-particle_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 particle_test
-.PHONY : particle_test
+neuron_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 neuron_test
+.PHONY : neuron_test
 
 # fast build rule for target.
-particle_test/fast:
-	$(MAKE) -f build/CMakeFiles/particle_test.dir/build.make build/CMakeFiles/particle_test.dir/build
-.PHONY : particle_test/fast
+neuron_test/fast:
+	$(MAKE) -f build/CMakeFiles/neuron_test.dir/build.make build/CMakeFiles/neuron_test.dir/build
+.PHONY : neuron_test/fast
 
 #=============================================================================
-# Target rules for targets named errorfunction_test
+# Target rules for targets named net_test_1
 
 # Build rule for target.
-errorfunction_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 errorfunction_test
-.PHONY : errorfunction_test
+net_test_1: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 net_test_1
+.PHONY : net_test_1
 
 # fast build rule for target.
-errorfunction_test/fast:
-	$(MAKE) -f build/CMakeFiles/errorfunction_test.dir/build.make build/CMakeFiles/errorfunction_test.dir/build
-.PHONY : errorfunction_test/fast
+net_test_1/fast:
+	$(MAKE) -f build/CMakeFiles/net_test_1.dir/build.make build/CMakeFiles/net_test_1.dir/build
+.PHONY : net_test_1/fast
 
 #=============================================================================
-# Target rules for targets named neural_network_test
+# Target rules for targets named net_test_pde_1
 
 # Build rule for target.
-neural_network_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 neural_network_test
-.PHONY : neural_network_test
+net_test_pde_1: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 net_test_pde_1
+.PHONY : net_test_pde_1
 
 # fast build rule for target.
-neural_network_test/fast:
-	$(MAKE) -f build/CMakeFiles/neural_network_test.dir/build.make build/CMakeFiles/neural_network_test.dir/build
-.PHONY : neural_network_test/fast
+net_test_pde_1/fast:
+	$(MAKE) -f build/CMakeFiles/net_test_pde_1.dir/build.make build/CMakeFiles/net_test_pde_1.dir/build
+.PHONY : net_test_pde_1/fast
 
 #=============================================================================
-# Target rules for targets named logistic_neuron_test
+# Target rules for targets named 4neuro
 
 # Build rule for target.
-logistic_neuron_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 logistic_neuron_test
-.PHONY : logistic_neuron_test
+4neuro: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 4neuro
+.PHONY : 4neuro
 
 # fast build rule for target.
-logistic_neuron_test/fast:
-	$(MAKE) -f build/CMakeFiles/logistic_neuron_test.dir/build.make build/CMakeFiles/logistic_neuron_test.dir/build
-.PHONY : logistic_neuron_test/fast
+4neuro/fast:
+	$(MAKE) -f build/CMakeFiles/4neuro.dir/build.make build/CMakeFiles/4neuro.dir/build
+.PHONY : 4neuro/fast
 
 #=============================================================================
-# Target rules for targets named particle_swarm_test
+# Target rules for targets named net_test_3
 
 # Build rule for target.
-particle_swarm_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 particle_swarm_test
-.PHONY : particle_swarm_test
+net_test_3: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 net_test_3
+.PHONY : net_test_3
 
 # fast build rule for target.
-particle_swarm_test/fast:
-	$(MAKE) -f build/CMakeFiles/particle_swarm_test.dir/build.make build/CMakeFiles/particle_swarm_test.dir/build
-.PHONY : particle_swarm_test/fast
+net_test_3/fast:
+	$(MAKE) -f build/CMakeFiles/net_test_3.dir/build.make build/CMakeFiles/net_test_3.dir/build
+.PHONY : net_test_3/fast
 
 #=============================================================================
-# Target rules for targets named dataset_test
+# Target rules for targets named test_cases
 
 # Build rule for target.
-dataset_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 dataset_test
-.PHONY : dataset_test
+test_cases: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 test_cases
+.PHONY : test_cases
 
 # fast build rule for target.
-dataset_test/fast:
-	$(MAKE) -f build/CMakeFiles/dataset_test.dir/build.make build/CMakeFiles/dataset_test.dir/build
-.PHONY : dataset_test/fast
+test_cases/fast:
+	$(MAKE) -f build/CMakeFiles/test_cases.dir/build.make build/CMakeFiles/test_cases.dir/build
+.PHONY : test_cases/fast
 
 #=============================================================================
-# Target rules for targets named neuron_serialization_example
+# Target rules for targets named binary_neuron_test
 
 # Build rule for target.
-neuron_serialization_example: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 neuron_serialization_example
-.PHONY : neuron_serialization_example
+binary_neuron_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 binary_neuron_test
+.PHONY : binary_neuron_test
 
 # fast build rule for target.
-neuron_serialization_example/fast:
-	$(MAKE) -f build/CMakeFiles/neuron_serialization_example.dir/build.make build/CMakeFiles/neuron_serialization_example.dir/build
-.PHONY : neuron_serialization_example/fast
+binary_neuron_test/fast:
+	$(MAKE) -f build/CMakeFiles/binary_neuron_test.dir/build.make build/CMakeFiles/binary_neuron_test.dir/build
+.PHONY : binary_neuron_test/fast
+
+#=============================================================================
+# Target rules for targets named connection_weight_identity_test
+
+# Build rule for target.
+connection_weight_identity_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 connection_weight_identity_test
+.PHONY : connection_weight_identity_test
+
+# fast build rule for target.
+connection_weight_identity_test/fast:
+	$(MAKE) -f build/CMakeFiles/connection_weight_identity_test.dir/build.make build/CMakeFiles/connection_weight_identity_test.dir/build
+.PHONY : connection_weight_identity_test/fast
 
 #=============================================================================
 # Target rules for targets named boost_unit_test
@@ -228,147 +241,147 @@ boost_unit_test/fast:
 .PHONY : boost_unit_test/fast
 
 #=============================================================================
-# Target rules for targets named connection_weight_identity_test
+# Target rules for targets named dataset_test
 
 # Build rule for target.
-connection_weight_identity_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 connection_weight_identity_test
-.PHONY : connection_weight_identity_test
+dataset_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 dataset_test
+.PHONY : dataset_test
 
 # fast build rule for target.
-connection_weight_identity_test/fast:
-	$(MAKE) -f build/CMakeFiles/connection_weight_identity_test.dir/build.make build/CMakeFiles/connection_weight_identity_test.dir/build
-.PHONY : connection_weight_identity_test/fast
+dataset_test/fast:
+	$(MAKE) -f build/CMakeFiles/dataset_test.dir/build.make build/CMakeFiles/dataset_test.dir/build
+.PHONY : dataset_test/fast
 
 #=============================================================================
-# Target rules for targets named net_test_ode_1
+# Target rules for targets named neuron_serialization_example
 
 # Build rule for target.
-net_test_ode_1: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 net_test_ode_1
-.PHONY : net_test_ode_1
+neuron_serialization_example: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 neuron_serialization_example
+.PHONY : neuron_serialization_example
 
 # fast build rule for target.
-net_test_ode_1/fast:
-	$(MAKE) -f build/CMakeFiles/net_test_ode_1.dir/build.make build/CMakeFiles/net_test_ode_1.dir/build
-.PHONY : net_test_ode_1/fast
+neuron_serialization_example/fast:
+	$(MAKE) -f build/CMakeFiles/neuron_serialization_example.dir/build.make build/CMakeFiles/neuron_serialization_example.dir/build
+.PHONY : neuron_serialization_example/fast
 
 #=============================================================================
-# Target rules for targets named binary_neuron_test
+# Target rules for targets named particle_swarm_test
 
 # Build rule for target.
-binary_neuron_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 binary_neuron_test
-.PHONY : binary_neuron_test
+particle_swarm_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 particle_swarm_test
+.PHONY : particle_swarm_test
 
 # fast build rule for target.
-binary_neuron_test/fast:
-	$(MAKE) -f build/CMakeFiles/binary_neuron_test.dir/build.make build/CMakeFiles/binary_neuron_test.dir/build
-.PHONY : binary_neuron_test/fast
+particle_swarm_test/fast:
+	$(MAKE) -f build/CMakeFiles/particle_swarm_test.dir/build.make build/CMakeFiles/particle_swarm_test.dir/build
+.PHONY : particle_swarm_test/fast
 
 #=============================================================================
-# Target rules for targets named test_cases
+# Target rules for targets named particle_test
 
 # Build rule for target.
-test_cases: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 test_cases
-.PHONY : test_cases
+particle_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 particle_test
+.PHONY : particle_test
 
 # fast build rule for target.
-test_cases/fast:
-	$(MAKE) -f build/CMakeFiles/test_cases.dir/build.make build/CMakeFiles/test_cases.dir/build
-.PHONY : test_cases/fast
+particle_test/fast:
+	$(MAKE) -f build/CMakeFiles/particle_test.dir/build.make build/CMakeFiles/particle_test.dir/build
+.PHONY : particle_test/fast
 
 #=============================================================================
-# Target rules for targets named net_test_3
+# Target rules for targets named linear_neuron_test
 
 # Build rule for target.
-net_test_3: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 net_test_3
-.PHONY : net_test_3
+linear_neuron_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 linear_neuron_test
+.PHONY : linear_neuron_test
 
 # fast build rule for target.
-net_test_3/fast:
-	$(MAKE) -f build/CMakeFiles/net_test_3.dir/build.make build/CMakeFiles/net_test_3.dir/build
-.PHONY : net_test_3/fast
+linear_neuron_test/fast:
+	$(MAKE) -f build/CMakeFiles/linear_neuron_test.dir/build.make build/CMakeFiles/linear_neuron_test.dir/build
+.PHONY : linear_neuron_test/fast
 
 #=============================================================================
-# Target rules for targets named 4neuro
+# Target rules for targets named connection_weight_test
 
 # Build rule for target.
-4neuro: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 4neuro
-.PHONY : 4neuro
+connection_weight_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 connection_weight_test
+.PHONY : connection_weight_test
 
 # fast build rule for target.
-4neuro/fast:
-	$(MAKE) -f build/CMakeFiles/4neuro.dir/build.make build/CMakeFiles/4neuro.dir/build
-.PHONY : 4neuro/fast
+connection_weight_test/fast:
+	$(MAKE) -f build/CMakeFiles/connection_weight_test.dir/build.make build/CMakeFiles/connection_weight_test.dir/build
+.PHONY : connection_weight_test/fast
 
 #=============================================================================
-# Target rules for targets named net_test_1
+# Target rules for targets named net_test_ode_1
 
 # Build rule for target.
-net_test_1: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 net_test_1
-.PHONY : net_test_1
+net_test_ode_1: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 net_test_ode_1
+.PHONY : net_test_ode_1
 
 # fast build rule for target.
-net_test_1/fast:
-	$(MAKE) -f build/CMakeFiles/net_test_1.dir/build.make build/CMakeFiles/net_test_1.dir/build
-.PHONY : net_test_1/fast
+net_test_ode_1/fast:
+	$(MAKE) -f build/CMakeFiles/net_test_ode_1.dir/build.make build/CMakeFiles/net_test_ode_1.dir/build
+.PHONY : net_test_ode_1/fast
 
 #=============================================================================
-# Target rules for targets named neuron_test
+# Target rules for targets named logistic_neuron_test
 
 # Build rule for target.
-neuron_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 neuron_test
-.PHONY : neuron_test
+logistic_neuron_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 logistic_neuron_test
+.PHONY : logistic_neuron_test
 
 # fast build rule for target.
-neuron_test/fast:
-	$(MAKE) -f build/CMakeFiles/neuron_test.dir/build.make build/CMakeFiles/neuron_test.dir/build
-.PHONY : neuron_test/fast
+logistic_neuron_test/fast:
+	$(MAKE) -f build/CMakeFiles/logistic_neuron_test.dir/build.make build/CMakeFiles/logistic_neuron_test.dir/build
+.PHONY : logistic_neuron_test/fast
 
 #=============================================================================
-# Target rules for targets named net_test_2
+# Target rules for targets named neural_network_test
 
 # Build rule for target.
-net_test_2: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 net_test_2
-.PHONY : net_test_2
+neural_network_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 neural_network_test
+.PHONY : neural_network_test
 
 # fast build rule for target.
-net_test_2/fast:
-	$(MAKE) -f build/CMakeFiles/net_test_2.dir/build.make build/CMakeFiles/net_test_2.dir/build
-.PHONY : net_test_2/fast
+neural_network_test/fast:
+	$(MAKE) -f build/CMakeFiles/neural_network_test.dir/build.make build/CMakeFiles/neural_network_test.dir/build
+.PHONY : neural_network_test/fast
 
 #=============================================================================
-# Target rules for targets named connection_weight_test
+# Target rules for targets named errorfunction_test
 
 # Build rule for target.
-connection_weight_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 connection_weight_test
-.PHONY : connection_weight_test
+errorfunction_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 errorfunction_test
+.PHONY : errorfunction_test
 
 # fast build rule for target.
-connection_weight_test/fast:
-	$(MAKE) -f build/CMakeFiles/connection_weight_test.dir/build.make build/CMakeFiles/connection_weight_test.dir/build
-.PHONY : connection_weight_test/fast
+errorfunction_test/fast:
+	$(MAKE) -f build/CMakeFiles/errorfunction_test.dir/build.make build/CMakeFiles/errorfunction_test.dir/build
+.PHONY : errorfunction_test/fast
 
 #=============================================================================
-# Target rules for targets named linear_neuron_test
+# Target rules for targets named NeuralNetworkSum_test
 
 # Build rule for target.
-linear_neuron_test: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 linear_neuron_test
-.PHONY : linear_neuron_test
+NeuralNetworkSum_test: cmake_check_build_system
+	$(MAKE) -f CMakeFiles/Makefile2 NeuralNetworkSum_test
+.PHONY : NeuralNetworkSum_test
 
 # fast build rule for target.
-linear_neuron_test/fast:
-	$(MAKE) -f build/CMakeFiles/linear_neuron_test.dir/build.make build/CMakeFiles/linear_neuron_test.dir/build
-.PHONY : linear_neuron_test/fast
+NeuralNetworkSum_test/fast:
+	$(MAKE) -f build/CMakeFiles/NeuralNetworkSum_test.dir/build.make build/CMakeFiles/NeuralNetworkSum_test.dir/build
+.PHONY : NeuralNetworkSum_test/fast
 
 # Help Target
 help:
@@ -378,26 +391,27 @@ help:
 	@echo "... depend"
 	@echo "... rebuild_cache"
 	@echo "... edit_cache"
-	@echo "... NeuralNetworkSum_test"
-	@echo "... particle_test"
-	@echo "... errorfunction_test"
-	@echo "... neural_network_test"
-	@echo "... logistic_neuron_test"
-	@echo "... particle_swarm_test"
+	@echo "... net_test_2"
+	@echo "... neuron_test"
+	@echo "... net_test_1"
+	@echo "... net_test_pde_1"
+	@echo "... 4neuro"
+	@echo "... net_test_3"
+	@echo "... test_cases"
+	@echo "... binary_neuron_test"
+	@echo "... connection_weight_identity_test"
+	@echo "... boost_unit_test"
 	@echo "... dataset_test"
 	@echo "... neuron_serialization_example"
-	@echo "... boost_unit_test"
-	@echo "... connection_weight_identity_test"
-	@echo "... net_test_ode_1"
-	@echo "... binary_neuron_test"
-	@echo "... test_cases"
-	@echo "... net_test_3"
-	@echo "... 4neuro"
-	@echo "... net_test_1"
-	@echo "... neuron_test"
-	@echo "... net_test_2"
-	@echo "... connection_weight_test"
+	@echo "... particle_swarm_test"
+	@echo "... particle_test"
 	@echo "... linear_neuron_test"
+	@echo "... connection_weight_test"
+	@echo "... net_test_ode_1"
+	@echo "... logistic_neuron_test"
+	@echo "... neural_network_test"
+	@echo "... errorfunction_test"
+	@echo "... NeuralNetworkSum_test"
 .PHONY : help
 
 
diff --git a/include/4neuro.h b/include/4neuro.h
index 5776cdd38be57fa333ddc27e5cb5bdfbd5c87e2c..e0d1efca8bf22b61b097a6437235c2ec19a3edd1 100644
--- a/include/4neuro.h
+++ b/include/4neuro.h
@@ -10,16 +10,13 @@
 #include "../src/DataSet/DataSet.h"
 #include "../src/ErrorFunction/ErrorFunctions.h"
 #include "../src/LearningMethods/ParticleSwarm.h"
-#include "../src/NetConnection/Connection.h"
-#include "../src/NetConnection/ConnectionWeight.h"
-#include "../src/NetConnection/ConnectionWeightIdentity.h"
+#include "../src/NetConnection/ConnectionFunctionGeneral.h"
+#include "../src/NetConnection/ConnectionFunctionIdentity.h"
 #include "../src/Network/NeuralNetwork.h"
 #include "../src/Network/NeuralNetworkSum.h"
 #include "../src/Neuron/Neuron.h"
 #include "../src/Neuron/NeuronBinary.h"
 #include "../src/Neuron/NeuronLinear.h"
 #include "../src/Neuron/NeuronLogistic.h"
-#include "../src/Neuron/NeuronNeuralNet.h"
-#include "../src/Neuron/NeuronTanh.h"
 
 #endif //INC_4NEURO_4NEURO_H
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 83b069287ac4371f57a579a43cfeb83d24ad6662..17c8048b172d3a05041eb4728b432c64ce9c9113 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -5,15 +5,12 @@ add_library(4neuro SHARED
         Neuron/NeuronBinary.cpp
         Neuron/NeuronLinear.cpp
         Neuron/NeuronLogistic.cpp
-        Neuron/NeuronTanh.cpp
-        NetConnection/Connection.cpp
         Network/NeuralNetwork.cpp
-        Neuron/NeuronNeuralNet.cpp
-        NetConnection/ConnectionWeight.cpp
-        NetConnection/ConnectionWeightIdentity.cpp
+        NetConnection/ConnectionFunctionGeneral.cpp
+        NetConnection/ConnectionFunctionIdentity.cpp
         LearningMethods/ParticleSwarm.cpp
         DataSet/DataSet.cpp
-        ErrorFunction/ErrorFunctions.cpp Network/NeuralNetworkSum.cpp Network/NeuralNetworkSum.h Solvers/DESolver.cpp Solvers/DESolver.h)
+        ErrorFunction/ErrorFunctions.cpp Network/NeuralNetworkSum.cpp Network/NeuralNetworkSum.h Solvers/DESolver.cpp Solvers/DESolver.h Neuron/NeuronConstant.cpp Neuron/NeuronConstant.h)
 
 target_link_libraries(4neuro boost_serialization)
 
@@ -40,6 +37,9 @@ target_link_libraries(net_test_3 4neuro)
 add_executable(net_test_ode_1 net_test_ode_1.cpp)
 target_link_libraries(net_test_ode_1 4neuro)
 
+add_executable(net_test_pde_1 net_test_pde_1.cpp)
+target_link_libraries(net_test_pde_1 4neuro)
+
 ##############
 # UNIT TESTS #
 ##############
@@ -57,15 +57,10 @@ target_link_libraries(binary_neuron_test boost_unit_test 4neuro)
 add_executable(logistic_neuron_test tests/NeuronLogistic_test.cpp)
 target_link_libraries(logistic_neuron_test boost_unit_test 4neuro)
 
-add_executable(tanh_neuron_test tests/NeuronTanh.cpp)
-target_link_libraries(tanh_neuron_test boost_unit_test 4neuro)
 
 add_executable(connection_weight_test tests/ConnectionWeight_test.cpp)
 target_link_libraries(connection_weight_test boost_unit_test 4neuro)
 
-add_executable(connection_test tests/Connection_test.cpp)
-target_link_libraries(connection_test boost_unit_test 4neuro)
-
 add_executable(neural_network_test tests/NeuralNetwork_test.cpp)
 target_link_libraries(neural_network_test boost_unit_test 4neuro)
 
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index e7cac11dc4f19c0f6bce70a76487fca53e768146..db09859032b596ecaaa9a7260d67908b690bc794 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -3,7 +3,6 @@
 //
 
 #include <vector>
-#include <utility>
 
 #include "ErrorFunctions.h"
 
@@ -14,10 +13,10 @@ size_t ErrorFunction::get_dimension() {
 MSE::MSE(NeuralNetwork *net, DataSet *ds) {
     this->net = net;
     this->ds = ds;
-    this->dimension = net->get_n_weights();
+    this->dimension = net->get_n_weights() + net->get_n_biases();
 }
 
-double MSE::eval(double *weights) {
+double MSE::eval(std::vector<double> *weights) {
     unsigned int dim_out = this->ds->get_output_dim();
 //    unsigned int dim_in = this->ds->get_input_dim();
     size_t n_elements = this->ds->get_n_elements();
@@ -66,7 +65,7 @@ ErrorSum::~ErrorSum(){
     }
 }
 
-double ErrorSum::eval(double *weights) {
+double ErrorSum::eval(std::vector<double> *weights) {
     double output = 0.0;
 
     for( unsigned int i = 0; i < this->summand->size(); ++i ){
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 03ffbd334adcbe470953c0962e64edc019965b4a..0f447583d43b141670f891363a53e8d2adc37b3d 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -20,7 +20,7 @@ public:
      * @param weights
      * @return
      */
-    virtual double eval(double* weights) = 0;
+    virtual double eval(std::vector<double>* weights = nullptr) = 0;
     
     /**
      * 
@@ -51,7 +51,7 @@ public:
      * @param weights
      * @return
      */
-    virtual double eval(double* weights = nullptr);
+    virtual double eval(std::vector<double>* weights = nullptr);
 
 private:
 
@@ -76,7 +76,7 @@ public:
      * @param weights
      * @return
      */
-    virtual double eval(double* weights = nullptr);
+    virtual double eval(std::vector<double>* weights = nullptr);
 
     /**
      *
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index 2a6b2f8bdbcf96f380f6e4783cbbffdc999decae..62cedd36f640e835697056775091a80c5cb65637 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -6,7 +6,6 @@
  */
 
 #include "ParticleSwarm.h"
-#include "../ErrorFunction/ErrorFunctions.h"
 
 /**
  * TODO
@@ -19,30 +18,32 @@ Particle::Particle(ErrorFunction* ef, double *domain_bounds) {
     //TODO better generating of random numbers
     this->coordinate_dim = ef->get_dimension();
 
-    this->coordinate = new double[this->coordinate_dim];
-
-    this->velocity = new double[this->coordinate_dim];
-
+    this->coordinate = new std::vector<double>(this->coordinate_dim);
+    this->velocity = new std::vector<double>(this->coordinate_dim);
+    this->optimal_coordinate = new std::vector<double>(this->coordinate_dim);
 
 
+    std::random_device seeder;
+    std::mt19937 gen(seeder());
+    std::uniform_real_distribution<double> dist_vel(0.5, 1.0);
     for(unsigned int i = 0; i < this->coordinate_dim; ++i){
-        this->velocity[i] = (rand() % 100001 - 50000) / (double) 50000;
+        (*this->velocity)[i] = dist_vel(gen);
     }
-//    this->r1 = (rand() % 100001) / (double) 100000;
-//    this->r2 = (rand() % 100001) / (double) 100000;
-    this->r1 = 1.0;
-    this->r2 = 1.0;
-    this->r3 = 1.0;
 
-    this->optimal_coordinate = new double[this->coordinate_dim];
+    this->r1 = dist_vel(gen);
+    this->r2 = dist_vel(gen);
+    this->r3 = dist_vel(gen);
+
 
-    this->ef = ef;
 
+    this->ef = ef;
+    std::uniform_real_distribution<double> dist_coord(-1.0, 1.0);
     this->domain_bounds = domain_bounds;
     for(unsigned int i = 0; i < this->coordinate_dim; ++i){
-        this->coordinate[i] = (rand() % 100001) / (double)100000 + domain_bounds[2 * i] / (domain_bounds[2 * i + 1] - domain_bounds[2 * i]);
-        this->optimal_coordinate[i] = this->coordinate[i];
+        (*this->coordinate)[i] = dist_coord(gen);
+        (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
     }
+//    (*this->coordinate) = {-6.35706416, -1.55399893, -1.05305639,  3.07464411};
 
 //    printf("coordinate_dim: %d\n", this->coordinate_dim);
     this->optimal_value = this->ef->eval(this->coordinate);
@@ -53,20 +54,20 @@ Particle::Particle(ErrorFunction* ef, double *domain_bounds) {
 Particle::~Particle() {
 
     if( this->optimal_coordinate ){
-        delete [] this->optimal_coordinate;
+        delete this->optimal_coordinate;
     }
 
     if( this->coordinate ){
-        delete [] this->coordinate;
+        delete this->coordinate;
     }
 
     if( this->velocity ){
-        delete [] this->velocity;
+        delete this->velocity;
     }
 
 }
 
-double* Particle::get_coordinate() {
+std::vector<double>* Particle::get_coordinate() {
     return this->coordinate;
 }
 
@@ -80,11 +81,11 @@ double Particle::get_optimal_value() {
 
 void Particle::get_optimal_coordinate(std::vector<double> &ref_coordinate) {
     for( unsigned int i = 0; i < this->coordinate_dim; ++i ){
-        ref_coordinate[i] = this->optimal_coordinate[i];
+        ref_coordinate[i] = (*this->optimal_coordinate)[i];
     }
 }
 
-double Particle::change_coordinate(double w, double c1, double c2, std::vector<double> glob_min_coord, std::vector<std::vector<double>> global_min_vec, double penalty_coef) {
+double Particle::change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef) {
 
     /**
      * v = w * v + c1r1(p_min_loc - x) + c2r2(p_min_glob - x) + c3r3(random_global_min - x)
@@ -92,32 +93,33 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
      */
 
     double vel_mem;
-    double output;
+    double output = 0.0;
     bool in_domain;
     double compensation_coef = 1;
 
     /* Choose random global minima */
-    std::vector<double> random_global_best(this->coordinate_dim);
+    std::vector<double> *random_global_best;
     std::random_device rand_dev;
     std::mt19937 engine{rand_dev()};
     std::uniform_int_distribution<int> dist(0, global_min_vec.size() - 1);
-    random_global_best = global_min_vec[dist(engine)];
+    random_global_best = &global_min_vec[dist(engine)];
 
     // TODO use std::sample to choose random vector
     //std::sample(global_min_vec.begin(), global_min_vec.end(), std::back_inserter(random_global_best), 1, std::mt19937{std::random_device{}()});
 
     for(unsigned int i = 0; i < this->coordinate_dim; ++i){
-        vel_mem = w * this->velocity[i]
-                + c1 * this->r1 * (this->optimal_coordinate[i] - this->coordinate[i])
-                + c2 * this->r2 * (glob_min_coord[i] - this->coordinate[i])
-                + (c1+c2)/2 * this->r3 * (random_global_best[i] - this->coordinate[i]);
+        vel_mem = w * (*this->velocity)[i]
+                + c1 * this->r1 * ((*this->optimal_coordinate)[i] - (*this->coordinate)[i])
+                + c2 * this->r2 * (glob_min_coord[i] - (*this->coordinate)[i])
+                + (c1+c2)/2 * this->r3 * ((*random_global_best)[i] - (*this->coordinate)[i]);
 
         do{
-            if (this->coordinate[i] + vel_mem > this->domain_bounds[2 * i + 1]) {
+//            printf("%f + %f ?? %f, %f\n", (*this->coordinate)[i], vel_mem, this->domain_bounds[2 * i + 1], this->domain_bounds[2 * i]);
+            if ((*this->coordinate)[i] + vel_mem > this->domain_bounds[2 * i + 1]) {
                 in_domain = false;
                 vel_mem = -penalty_coef * compensation_coef * w * vel_mem;
                 compensation_coef /= 2;
-            } else if (this->coordinate[i] + vel_mem < this->domain_bounds[2 * i]) {
+            } else if ((*this->coordinate)[i] + vel_mem < this->domain_bounds[2 * i]) {
                 in_domain = false;
                 vel_mem = penalty_coef * compensation_coef * w * vel_mem;
                 compensation_coef /= 2;
@@ -127,8 +129,8 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
             }
         }while(!in_domain);
 
-        this->velocity[i] = vel_mem;
-        this->coordinate[i] += vel_mem;
+        (*this->velocity)[i] = vel_mem;
+        (*this->coordinate)[i] += vel_mem;
 
         output += std::abs(vel_mem);
     }
@@ -139,7 +141,7 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
     if(vel_mem < this->optimal_value){
         this->optimal_value = vel_mem;
         for(unsigned int i = 0; i < this->coordinate_dim; ++i){
-            this->optimal_coordinate[i] = this->coordinate[i];
+            (*this->optimal_coordinate)[i] = (*this->coordinate)[i];
         }
     }
 
@@ -148,17 +150,17 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d
 
 void Particle::print_coordinate() {
     for(unsigned int i = 0; i < this->coordinate_dim - 1; ++i){
-        printf("%10.8f, ", this->coordinate[i]);
+        printf("%10.8f, ", (*this->coordinate)[i]);
     }
-    printf("%10.8f\n", this->coordinate[this->coordinate_dim - 1]);
+    printf("%10.8f\n", (*this->coordinate)[this->coordinate_dim - 1]);
 }
 
 ParticleSwarm::ParticleSwarm(ErrorFunction* ef, double *domain_bounds,
-                             double c1, double c2, double w, unsigned int n_particles, unsigned int iter_max) {
+                             double c1, double c2, double w, size_t n_particles, size_t iter_max) {
 
     srand(time(NULL));
 
-    //this->func = F;
+    this->f = ef;
 
     this->func_dim = ef->get_dimension();
 
@@ -176,7 +178,7 @@ ParticleSwarm::ParticleSwarm(ErrorFunction* ef, double *domain_bounds,
 
     this->particle_swarm = new Particle*[this->n_particles];
 
-    for( unsigned int pi = 0; pi < this->n_particles; ++pi ){
+    for( size_t pi = 0; pi < this->n_particles; ++pi ){
         this->particle_swarm[pi] = new Particle(ef, domain_bounds);
     }
 
@@ -188,13 +190,18 @@ ParticleSwarm::ParticleSwarm(ErrorFunction* ef, double *domain_bounds,
 ParticleSwarm::~ParticleSwarm() {
 
     if( this->particle_swarm ){
-        for( unsigned int i = 0; i < this->n_particles; ++i ){
+        for( size_t i = 0; i < this->n_particles; ++i ){
             delete this->particle_swarm[i];
         }
 
         delete [] this->particle_swarm;
     }
 
+    if(this->p_min_glob){
+        delete this->p_min_glob;
+        this->p_min_glob = nullptr;
+    }
+
 }
 
 /**
@@ -207,16 +214,18 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
     if(epsilon < 0 || gamma < 0 || delta < 0) {
         throw std::invalid_argument("Parameters 'gamma', 'epsilon' and 'delta' must be greater than or equal to zero!");
     }
+    if(!this->p_min_glob){
+        this->p_min_glob = new std::vector<double>(this->func_dim);
+    }
 
-    unsigned int outer_it = 0;
+    size_t outer_it = 0;
     Particle *particle;
-    std::vector<double> p_min_glob(this->func_dim);
+
     std::vector<std::vector<double>> global_best_vec;
     double optimal_value = 0.0;
 
     std::set<Particle*> cluster; //!< Particles in a cluster
-    double* coords;
-    coords = new double[this->func_dim]; //<! Centroid coordinates
+    std::vector<double>* centroid = new std::vector<double>(this->func_dim);//<! Centroid coordinates
 
     double tmp_velocity;
     double prev_max_velocity = 0;
@@ -225,6 +234,12 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
     double prev_max_vel_step;
     double euclidean_dist;
 
+    this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
+//    for(unsigned int i = 0; i < this->n_particles; ++i){
+//        this->particle_swarm[i]->print_coordinate();
+//    }
+    printf("Initial best value: %10.8f\n", optimal_value);
+
     while( outer_it < this->iter_max ) {
         max_velocity = 0;
         euclidean_dist = 0;
@@ -232,33 +247,34 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
         //////////////////////////////////////////////////
         // Clustering algorithm - termination condition //
         //////////////////////////////////////////////////
-        particle = this->determine_optimal_coordinate_and_value(p_min_glob, optimal_value);
+        particle = this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
 
-        if(std::find(global_best_vec.begin(), global_best_vec.end(), p_min_glob) == global_best_vec.end()) {
-            global_best_vec.emplace_back(p_min_glob); // TODO rewrite as std::set
+        if(std::find(global_best_vec.begin(), global_best_vec.end(), *this->p_min_glob) == global_best_vec.end()) {
+            global_best_vec.emplace_back(*this->p_min_glob); // TODO rewrite as std::set
         }
 
         cluster.insert(particle);
 
         //for(unsigned int i=0; i < 5; i++) {
             /* Zero AVG coordinates */
-            std::fill(coords, coords+this->func_dim, 0);
+            std::fill(centroid->begin(), centroid->end(), 0);
+            std::vector<double> *c_ptr;
 
             /* Looking for a centroid */
             for (auto it : cluster) {
-
-                for (unsigned int di = 0; di < this->func_dim; di++) {
-                    coords[di] += it->get_coordinate()[di];
+                c_ptr = it->get_coordinate();
+                for (size_t di = 0; di < this->func_dim; di++) {
+                    (*centroid)[di] += (*c_ptr)[di];
                 }
             }
 
-            for(unsigned int di = 0; di < this->func_dim; di++) {
-                coords[di] /= cluster.size();
+            for(size_t di = 0; di < this->func_dim; di++) {
+                (*centroid)[di] /= cluster.size();
             }
 
-            for(unsigned int pi=0; pi < this->n_particles; pi++) {
+            for(size_t pi=0; pi < this->n_particles; pi++) {
                 particle = this->particle_swarm[pi];
-                tmp_velocity = particle->change_coordinate( this->w, this->c1, this->c2, p_min_glob, global_best_vec);
+                tmp_velocity = particle->change_coordinate( this->w, this->c1, this->c2, *this->p_min_glob, global_best_vec);
 
                 if(tmp_velocity > max_velocity) {
                     prev_max_velocity = max_velocity;
@@ -270,9 +286,9 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
 
                 // TODO - only in verbose mode
                 // only for info purposes
-                euclidean_dist += this->get_euclidean_distance(particle->get_coordinate(), coords, this->func_dim);
+                euclidean_dist += this->get_euclidean_distance(particle->get_coordinate(), centroid);
 
-                if(this->get_euclidean_distance(particle->get_coordinate(), coords, this->func_dim) < epsilon) {
+                if(this->get_euclidean_distance(particle->get_coordinate(), centroid) < epsilon) {
                     cluster.insert(particle);
                 }
             }
@@ -283,8 +299,8 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
 
         //TODO only in verbose mode
         euclidean_dist /= this->n_particles;
-        //printf("Iteration %d, avg euclidean distance: %f, cluster percent: %f, f-value: %f\n", outer_it, euclidean_dist,
-//               double(cluster.size())/this->n_particles, optimal_value);
+        printf("Iteration %d, avg euclidean distance: %f, cluster percent: %f, f-value: %f\n", (int)outer_it, euclidean_dist,
+               double(cluster.size())/this->n_particles, optimal_value);
 
 //        for(unsigned int i=0; i < this->n_particles; i++) {
 //            printf("Particle %d (%f): \n", i, this->particle_swarm[i]->get_current_value());
@@ -294,7 +310,7 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
 //        }
 
         /* Check if the particles are near to each other AND the maximal velocity is less than 'gamma' */
-        if(double(cluster.size())/this->n_particles > delta && std::abs(prev_max_vel_step/max_vel_step) > gamma) {
+        if(cluster.size() > delta * this->n_particles && std::abs(prev_max_vel_step/max_vel_step) > gamma) {
             break;
         }
 
@@ -302,24 +318,25 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
 //        this->w *= 0.99;
     }
 
+    this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
     if(outer_it < this->iter_max) {
         /* Convergence reached */
 
-        printf("Found optimum in %d iterations: %10.8f at coordinates: \n", outer_it, optimal_value);
-        for (unsigned int i = 0; i <= this->func_dim - 1; ++i) {
-            printf("%10.8f \n", p_min_glob[i]);
+        printf("Found optimum in %d iterations: %10.8f at coordinates: \n", (int)outer_it, optimal_value);
+        for (size_t i = 0; i <= this->func_dim - 1; ++i) {
+            printf("%10.8f \n", (*this->p_min_glob)[i]);
         }
     } else {
         /* Maximal number of iterations reached */
 
-        printf("Max number of iterations reached (%d)! Found value %10.8f at coordinates: \n", outer_it, optimal_value);
-        for (unsigned int i = 0; i <= this->func_dim - 1; ++i) {
-            printf("\t%10.8f \n", p_min_glob[i]);
+        printf("Max number of iterations reached (%d)! Found value %10.8f at coordinates: \n", (int)outer_it, optimal_value);
+        for (size_t i = 0; i <= this->func_dim - 1; ++i) {
+            printf("\t%10.8f \n", (*this->p_min_glob)[i]);
         }
     }
 
     //delete [] p_min_glob; // TODO
-    delete [] coords;
+    delete centroid;
 }
 
 Particle* ParticleSwarm::determine_optimal_coordinate_and_value(std::vector<double> &coord, double &val) {
@@ -330,7 +347,7 @@ Particle* ParticleSwarm::determine_optimal_coordinate_and_value(std::vector<doub
     this->particle_swarm[0]->get_optimal_coordinate(coord);
     p = this->particle_swarm[0];
 
-    for(unsigned int i = 1; i < this->n_particles; ++i){
+    for(size_t i = 1; i < this->n_particles; ++i){
 
         double val_m = this->particle_swarm[i]->get_optimal_value( );
 
@@ -344,31 +361,35 @@ Particle* ParticleSwarm::determine_optimal_coordinate_and_value(std::vector<doub
     return p;
 }
 
-double* ParticleSwarm::get_centroid_coordinates() {
-    double* coords = new double[this->func_dim];
-    double* tmp;
+std::vector<double>* ParticleSwarm::get_centroid_coordinates() {
+    std::vector<double>* coords = new std::vector<double>(this->func_dim);
+    std::vector<double>* tmp;
 
-    for (unsigned int pi = 0; pi < this->n_particles; pi++) {
+    for (size_t pi = 0; pi < this->n_particles; pi++) {
         tmp = this->particle_swarm[pi]->get_coordinate();
 
-        for (unsigned int di = 0; di < this->func_dim; di++) {
-            coords[di] += tmp[di];
+        for (size_t di = 0; di < this->func_dim; di++) {
+            (*coords)[di] += (*tmp)[di];
         }
     }
 
-    for(unsigned int di = 0; di < this->func_dim; di++) {
-        coords[di] /= this->n_particles;
+    for(size_t di = 0; di < this->func_dim; di++) {
+        (*coords)[di] /= this->n_particles;
     }
 
     return coords;
 }
 
-double ParticleSwarm::get_euclidean_distance(double* a, double* b, unsigned int n) {
-    double dist = 0;
-    for(unsigned int i = 0; i < n; i++) {
-        if((a[i]-b[i]) * (a[i]-b[i]) > 1000) {
-        }
-        dist += ((a[i]-b[i]) * (a[i]-b[i]));
+double ParticleSwarm::get_euclidean_distance(std::vector<double>* a, std::vector<double>* b) {
+    double dist = 0, m;
+    for(size_t i = 0; i < a->size(); i++) {
+        m = (*a)[i]-(*b)[i];
+        m *= m;
+        dist += m;
     }
     return std::sqrt(dist);
+}
+
+std::vector<double>* ParticleSwarm::get_solution() {
+    return this->p_min_glob;
 }
\ No newline at end of file
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index f98258a00140632504834856eb71755fd293a2a9..65e058a54a13b2b176a63586ef8f8bca10a7d3b5 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -26,10 +26,10 @@ class Particle{
 private:
 
     size_t coordinate_dim;
-    double *coordinate = nullptr;
-    double *velocity;
+    std::vector<double> *coordinate = nullptr;
+    std::vector<double> *velocity = nullptr;
 
-    double *optimal_coordinate = nullptr;
+    std::vector<double> *optimal_coordinate = nullptr;
     double optimal_value;
 
     double r1;
@@ -42,10 +42,15 @@ private:
 
     double *domain_bounds;
 
-    void print_coordinate();
+
 
 public:
 
+    /**
+     *
+     */
+    void print_coordinate();
+
     /**
      *
      * @param f_dim
@@ -57,7 +62,7 @@ public:
      *
      * @return
      */
-    double* get_coordinate();
+    std::vector<double>* get_coordinate();
 
     /**
      *
@@ -85,7 +90,7 @@ public:
      * @param glob_min_coord
      * @param penalty_coef
      */
-    double change_coordinate(double w, double c1, double c2, std::vector<double> glob_min_coord, std::vector<std::vector<double>> global_min_vec, double penalty_coef=0.25);
+    double change_coordinate(double w, double c1, double c2, std::vector<double> &glob_min_coord, std::vector<std::vector<double>> &global_min_vec, double penalty_coef=0.25);
 };
 
 
@@ -101,13 +106,13 @@ private:
     /**
      *
      */
-    double (*func)(NeuralNetwork, double*, DataSet);
+    ErrorFunction* f;
 
     size_t func_dim;
 
-    unsigned int n_particles;
+    size_t n_particles;
 
-    unsigned int iter_max;
+    size_t iter_max;
 
     double c1;
 
@@ -121,6 +126,8 @@ private:
 
     double *domain_bounds;
 
+    std::vector<double> *p_min_glob = nullptr;
+
 protected:
     /**
      *
@@ -134,7 +141,7 @@ protected:
      *
      * @return
      */
-    double* get_centroid_coordinates();
+    std::vector<double>* get_centroid_coordinates();
 
     /**
      *
@@ -143,7 +150,7 @@ protected:
      * @param n
      * @return
      */
-    double get_euclidean_distance(double* a, double* b, unsigned int n);
+    double get_euclidean_distance(std::vector<double>* a, std::vector<double>* b);
 
 public:
 
@@ -158,7 +165,7 @@ public:
      * @param n_particles
      * @param iter_max
      */
-    ParticleSwarm( ErrorFunction* ef, double* domain_bounds, double c1, double c2, double w, unsigned int n_particles, unsigned int iter_max = 1 );
+    ParticleSwarm( ErrorFunction* ef, double* domain_bounds, double c1, double c2, double w, size_t n_particles, size_t iter_max = 1 );
 
     /**
      *
@@ -174,6 +181,12 @@ public:
      */
     void optimize( double gamma, double epsilon, double delta=0.7 );
 
+    /**
+     *
+     * @return
+     */
+    std::vector<double>* get_solution();
+
 
 };
 
diff --git a/src/NetConnection/ConnectionFunctionGeneral.cpp b/src/NetConnection/ConnectionFunctionGeneral.cpp
index 88f4fd579f6951b5fa8856902fc6a66e7db51f17..dcbb6e158bfea8de12af434ff84f38e1bcda6ffa 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.cpp
+++ b/src/NetConnection/ConnectionFunctionGeneral.cpp
@@ -6,58 +6,31 @@
  */
 
 
-#include "ConnectionWeight.h"
+#include "ConnectionFunctionGeneral.h"
 
-ConnectionWeight::ConnectionWeight() {
+ConnectionFunctionGeneral::ConnectionFunctionGeneral() {
 
 
 }
 
-ConnectionWeight::ConnectionWeight(int param_count,  std::vector<double>* w_array) {
-    this->param_indices = new int[param_count];
-    this->n_params = param_count;
+ConnectionFunctionGeneral::ConnectionFunctionGeneral(std::vector<double>* w_array, std::vector<unsigned int> &param_indices, std::string &function_string) {
+    this->param_indices = new std::vector<unsigned int>(param_indices);
     this->weight_array = w_array;
 }
 
-ConnectionWeight::~ConnectionWeight() {
+ConnectionFunctionGeneral::~ConnectionFunctionGeneral() {
     if(this->param_indices){
-        delete [] this->param_indices;
+        delete this->param_indices;
         this->param_indices = nullptr;
     }
 }
 
-void ConnectionWeight::adjust_weights(double *values) {
-    for(int i = 0; i < this->n_params; ++i){
-        this->weight_array->at(this->param_indices[i]) += values[i];
-    }
-}
+double ConnectionFunctionGeneral::eval() {
+    //TODO
 
-std::vector<double> ConnectionWeight::get_weights(){
-    return *this->weight_array;
+    return 0.0;
 }
 
-void ConnectionWeight::set_weights(double *values) {
-    for(int i = 0; i < this->n_params; ++i){
-        this->weight_array->at(this->param_indices[i]) = values[i];
-    }
+void ConnectionFunctionGeneral::eval_partial_derivative(std::vector<double> &weight_gradient, double alpha) {
+    //TODO
 }
-
-void ConnectionWeight::SetParamIndices(int *param_indices) {
-    for(int i = 0; i < this->n_params; ++i){
-        this->SetParamIndex(param_indices[i], i);
-    }
-}
-
-void ConnectionWeight::SetParamIndex(int value, int idx) {
-    this->param_indices[idx] = value;
-}
-
-double ConnectionWeight::eval() {
-    double product = 1;
-    for(auto e : *this->weight_array) {
-        product *= e;
-    }
-
-    return product;
-}
-
diff --git a/src/NetConnection/ConnectionFunctionGeneral.h b/src/NetConnection/ConnectionFunctionGeneral.h
index 96079cf4222c24eaa59a68c3b8da30a6f7bd994c..26761cc600801ceedab056988bbef2e024a6ab55 100644
--- a/src/NetConnection/ConnectionFunctionGeneral.h
+++ b/src/NetConnection/ConnectionFunctionGeneral.h
@@ -11,7 +11,7 @@
 #include <functional>
 #include <vector>
 
-class ConnectionWeight {
+class ConnectionFunctionGeneral {
 protected:
     /**
      *
@@ -21,68 +21,40 @@ protected:
     /**
      *
      */
-    int* param_indices = nullptr;
-
-    /**
-     *
-     */
-    int n_params = 0;
+    std::vector<unsigned int> *param_indices = nullptr;
 
 public:
 
     /**
      *
      */
-    ConnectionWeight();
+    ConnectionFunctionGeneral();
 
     /**
      *
      * @param param_count
      * @param f
      */
-    ConnectionWeight(int param_count, std::vector<double>* w_array);
+    ConnectionFunctionGeneral(std::vector<double>* w_array, std::vector<unsigned int> &param_indices, std::string &function_string);
 
     /**
      *
-     * @param value
-     * @param idx
      */
-    void SetParamIndex(int value, int idx);
+    virtual ~ConnectionFunctionGeneral();
 
-    /**
-     *
-     * @param param_ptr
-     */
-    void SetParamIndices(int* param_ptr);
-
-    /**
-     *
-     */
-    virtual ~ConnectionWeight();
 
     /**
      *
      * @return
      */
-    virtual double eval();
+    virtual double eval( );
 
     /**
-     *
-     * @param values
+     * Performs partial derivative of this transfer function according to all parameters. Adds the values multiplied
+     * by alpha to the corresponding gradient vector
      */
+    virtual void eval_partial_derivative(std::vector<double> &weight_gradient, double alpha);
 
-    void set_weights(double *values);
-
-    /**
-     *
-     * @return vector of weights
-     */
-    std::vector<double> get_weights();
-    /**
-     *
-     * @param values
-     */
-    void adjust_weights(double *values);
 
 };
 
diff --git a/src/NetConnection/ConnectionFunctionIdentity.cpp b/src/NetConnection/ConnectionFunctionIdentity.cpp
index b69160f489296576ca71e5cb99a6ada39c8a7eaf..20a1d2de604e2f213e9ae7bf3effddc95d199964 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.cpp
+++ b/src/NetConnection/ConnectionFunctionIdentity.cpp
@@ -5,12 +5,17 @@
  * @date 14.6.18 -
  */
 
-#include "ConnectionWeightIdentity.h"
+#include "ConnectionFunctionIdentity.h"
 
-ConnectionWeightIdentity::ConnectionWeightIdentity(double * val) {
+ConnectionFunctionIdentity::ConnectionFunctionIdentity(double * val, size_t pidx) {
     this->value = val;
+    this->param_idx = pidx;
 }
 
-double ConnectionWeightIdentity::eval() {
+double ConnectionFunctionIdentity::eval() {
     return (*this->value);
+}
+
+void ConnectionFunctionIdentity::eval_partial_derivative(std::vector<double> &weight_gradient, double alpha) {
+    weight_gradient[this->param_idx] += alpha;
 }
\ No newline at end of file
diff --git a/src/NetConnection/ConnectionFunctionIdentity.h b/src/NetConnection/ConnectionFunctionIdentity.h
index d1d9c8d0f934c08812667f07addb884a1d4fad26..4887ed65d099d2d1a1d6271e700b765ccca70f26 100644
--- a/src/NetConnection/ConnectionFunctionIdentity.h
+++ b/src/NetConnection/ConnectionFunctionIdentity.h
@@ -15,21 +15,29 @@ class ConnectionFunctionGeneral;
 /**
  *
  */
-class ConnectionWeightIdentity:public ConnectionFunctionGeneral {
+class ConnectionFunctionIdentity:public ConnectionFunctionGeneral {
 private:
     double * value = nullptr;
+    size_t param_idx;
 public:
 
     /**
      *
      */
-    ConnectionWeightIdentity(double * value_ptr);
+    ConnectionFunctionIdentity(double * value_ptr, size_t pidx);
 
     /**
      *
      * @return
      */
     double eval() override;
+
+    /**
+     *
+     * @param weight_gradient
+     * @param alpha
+     */
+    void eval_partial_derivative(std::vector<double> &weight_gradient, double alpha) override;
 };
 
 
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index 66246c83fd46a6e3d11e525f10c8f9b1fc5a86a6..5861b049a5c287ad63d62f291fabfa0132479d99 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -9,410 +9,489 @@
 #include <boost/random/mersenne_twister.hpp>
 #include <boost/random/uniform_real_distribution.hpp>
 #include "NeuralNetwork.h"
-#include "../NetConnection/ConnectionWeightIdentity.h"
 
 NeuralNetwork::NeuralNetwork() {
     this->neurons = new std::vector<Neuron*>(0);
-    this->connection_weights = new std::vector<double>(0);
+    this->neuron_biases = nullptr;
+    this->neuron_potentials = new std::vector<double>(0);
 
-    this->connection_weights->reserve(0);
+    this->connection_weights = nullptr;
+    this->connection_list = new std::vector<ConnectionFunctionGeneral*>(0);
+    this->inward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>>*>(0);
+    this->outward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>>*>(0);
 
-    this->delete_weights = true;
-}
-
-NeuralNetwork* NeuralNetwork::get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices){
-    NeuralNetwork *output_net = nullptr;
-
-    Neuron * active_neuron, * target_neuron;
 
-    size_t n = this->neurons->size();
-    bool *part_of_subnetwork = new bool[n];
-    std::fill(part_of_subnetwork, part_of_subnetwork + n, false);
+    this->neuron_layers_feedforward = new std::vector<std::vector<size_t>*>(0);
+    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
 
-    bool *is_reachable_from_source = new bool[n];
-    bool *is_reachable_from_destination = new bool[n];
-    std::fill(is_reachable_from_source, is_reachable_from_source + n, false);
-    std::fill(is_reachable_from_destination, is_reachable_from_destination + n, false);
-
-    bool *visited_neurons = new bool[n];
-    std::fill(visited_neurons, visited_neurons + n, false);
-
-    size_t active_set_size[2];
-    active_set_size[0] = 0;
-    active_set_size[1] = 0;
-    size_t * active_neuron_set = new size_t[2 * n];
-    size_t idx1 = 0, idx2 = 1;
-
-    /* MAPPING BETWEEN NEURONS AND THEIR INDICES */
-    size_t idx = 0, idx_target;
-    for(Neuron *v: *this->neurons){
-        v->set_idx( idx );
-        idx++;
-    }
-
-    /* INITIAL STATE FOR THE FORWARD PASS */
-    for(size_t i: input_neuron_indices ){
-
-        if( i < 0 || i >= n){
-            //invalid index
-            continue;
-        }
-        active_neuron_set[idx1 * n + active_set_size[idx1]] = i;
-        active_set_size[idx1]++;
+    this->delete_weights = false;
+    this->delete_biases = false;
+    this->layers_analyzed = false;
+    this->in_out_determined = false;
 
-        visited_neurons[i] = true;
-    }
+    this->last_used_bias_idx = 0;
+    this->last_used_weight_idx = 0;
+}
 
-    /* FORWARD PASS */
-    while(active_set_size[idx1] > 0){
+NeuralNetwork::NeuralNetwork(size_t n_connection_weights, size_t n_biases) {
 
-        //we iterate through the active neurons and propagate the signal
-        for(int i = 0; i < active_set_size[idx1]; ++i){
-            idx = active_neuron_set[i];
+    this->neurons = new std::vector<Neuron*>(0);
+    this->neuron_biases = new std::vector<double>(n_biases);
+    this->neuron_potentials = new std::vector<double>(0);
 
-            is_reachable_from_source[ idx ] = true;
+    this->connection_weights = new std::vector<double>(n_connection_weights);
+    this->connection_list = new std::vector<ConnectionFunctionGeneral*>(0);
+    this->inward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>>*>(0);
+    this->outward_adjacency = new std::vector<std::vector<std::pair<size_t, size_t>>*>(0);
 
-            active_neuron = this->neurons->at( idx );
+    this->neuron_layers_feedforward = new std::vector<std::vector<size_t>*>(0);
+    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
 
-            for(Connection* connection: *(active_neuron->get_connections_out())){
+    this->delete_weights = true;
+    this->delete_biases = true;
+    this->layers_analyzed = false;
+    this->in_out_determined = false;
 
-                target_neuron = connection->get_neuron_out( );
-                idx_target = target_neuron->get_idx( );
+    this->last_used_bias_idx = 0;
+    this->last_used_weight_idx = 0;
+}
 
-                if( visited_neurons[idx_target] ){
-                    //this neuron was already analyzed
-                    continue;
-                }
+NeuralNetwork::~NeuralNetwork() {
 
-                visited_neurons[idx_target] = true;
-                active_neuron_set[active_set_size[idx2] + n * idx2] = idx_target;
-                active_set_size[idx2]++;
-            }
+    if(this->neurons){
+        for( auto n: *this->neurons ){
+            delete n;
+            n = nullptr;
         }
-        idx1 = idx2;
-        idx2 = (idx1 + 1) % 2;
-        active_set_size[idx2] = 0;
+        delete this->neurons;
+        this->neurons = nullptr;
     }
 
+    if(this->neuron_potentials){
+        delete this->neuron_potentials;
+        this->neuron_potentials = nullptr;
+    }
 
-    /* INITIAL STATE FOR THE FORWARD PASS */
-    active_set_size[0] = active_set_size[1] = 0;
-    std::fill(visited_neurons, visited_neurons + n, false);
-
-    for(size_t i: output_neuron_indices ){
-
-        if( i < 0 || i >= n){
-            //invalid index
-            continue;
-        }
-        active_neuron_set[idx1 * n + active_set_size[idx1]] = i;
-        active_set_size[idx1]++;
-
-        visited_neurons[i] = true;
+    if(this->output_neuron_indices){
+        delete this->output_neuron_indices;
+        this->output_neuron_indices = nullptr;
     }
 
-    /* BACKWARD PASS */
-    size_t n_new_neurons = 0;
-    while(active_set_size[idx1] > 0){
+    if(this->input_neuron_indices){
+        delete this->input_neuron_indices;
+        this->input_neuron_indices = nullptr;
+    }
 
-        //we iterate through the active neurons and propagate the signal
-        for(int i = 0; i < active_set_size[idx1]; ++i){
-            idx = active_neuron_set[i];
+    if(this->connection_weights && this->delete_weights){
+        delete this->connection_weights;
+        this->connection_weights = nullptr;
+    }
 
-            is_reachable_from_destination[ idx ] = true;
+    if(this->neuron_biases && this->delete_biases){
+        delete this->neuron_biases;
+        this->neuron_biases = nullptr;
+    }
 
-            active_neuron = this->neurons->at( idx );
+    if(this->connection_list){
 
-            if(is_reachable_from_source[ idx ]){
-                n_new_neurons++;
+        if(this->delete_weights){
+            for(auto c: *this->connection_list){
+                delete c;
+                c = nullptr;
             }
+        }
+        delete this->connection_list;
+        this->connection_list = nullptr;
+    }
 
-            for(Connection* connection: *(active_neuron->get_connections_in())){
-
-                target_neuron = connection->get_neuron_in( );
-                idx_target = target_neuron->get_idx( );
-
-                if( visited_neurons[idx_target] ){
-                    //this neuron was already analyzed
-                    continue;
-                }
-
-                visited_neurons[idx_target] = true;
-                active_neuron_set[active_set_size[idx2] + n * idx2] = idx_target;
-                active_set_size[idx2]++;
+    if(this->inward_adjacency){
+        for(auto e: *this->inward_adjacency){
+            if(e){
+                delete e;
+                e = nullptr;
             }
         }
-        idx1 = idx2;
-        idx2 = (idx1 + 1) % 2;
-        active_set_size[idx2] = 0;
+        delete this->inward_adjacency;
+        this->inward_adjacency = nullptr;
     }
 
-    /* FOR CONSISTENCY REASONS */
-    for(size_t in: input_neuron_indices){
-        if( !is_reachable_from_destination[in] ){
-            n_new_neurons++;
+    if(this->outward_adjacency){
+        for(auto e: *this->outward_adjacency){
+            if(e){
+                delete e;
+                e = nullptr;
+            }
         }
-        is_reachable_from_destination[in] = true;
+        delete this->outward_adjacency;
+        this->outward_adjacency = nullptr;
     }
-    /* FOR CONSISTENCY REASONS */
-    for(size_t in: output_neuron_indices){
-        if( !is_reachable_from_source[in] ){
-            n_new_neurons++;
+
+    if(this->neuron_layers_feedforward){
+        for(auto e: *this->neuron_layers_feedforward){
+            delete e;
+            e = nullptr;
         }
-        is_reachable_from_source[in] = true;
+        delete this->neuron_layers_feedforward;
+        this->neuron_layers_feedforward = nullptr;
     }
 
-    /* WE FORM THE SET OF NEURONS IN THE OUTPUT NETWORK  */
-    if(n_new_neurons > 0){
-//        printf("Number of new neurons: %d\n", n_new_neurons);
-        output_net = new NeuralNetwork();
-        output_net->set_weight_array( this->connection_weights );
-
-        std::vector<size_t > local_inputs(0), local_outputs(0);
-        local_inputs.reserve(input_neuron_indices.size());
-        local_outputs.reserve(output_neuron_indices.size());
-
-        std::vector<Neuron*> local_n_arr(0);
-        local_n_arr.reserve( n_new_neurons );
-
-        std::vector<Neuron*> local_local_n_arr(0);
-        local_local_n_arr.reserve( n_new_neurons );
-
-        int * neuron_local_mapping = new int[ n ];
-        std::fill(neuron_local_mapping, neuron_local_mapping + n, -1);
-        idx = 0;
-        for(size_t i = 0; i < n; ++i){
-            if(is_reachable_from_source[i] && is_reachable_from_destination[i]){
-                neuron_local_mapping[i] = (int)idx;
-                idx++;
-
-                Neuron *new_neuron = this->neurons->at(i)->get_copy( );
-
-                output_net->add_neuron( new_neuron );
-                local_local_n_arr.push_back( new_neuron );
-                local_n_arr.push_back( this->neurons->at(i) );
-            }
-        }
-        for(size_t in: input_neuron_indices){
-            local_inputs.push_back(neuron_local_mapping[in]);
+    if(this->neuron_layers_feedbackward){
+        for(auto e: *this->neuron_layers_feedbackward){
+            delete e;
+            e = nullptr;
         }
-        for(size_t in: output_neuron_indices){
-            local_outputs.push_back(neuron_local_mapping[in]);
-        }
-
-//        printf("%d\n", local_n_arr.size());
-//        printf("inputs: %d, outputs: %d\n", local_inputs.size(), local_outputs.size());
-        int local_idx_1, local_idx_2;
-        for(Neuron* source_neuron: local_n_arr){
-            //we also add the relevant edges
-            local_idx_1 = neuron_local_mapping[source_neuron->get_idx()];
-
-            for(Connection* connection: *(source_neuron->get_connections_out( ))){
-                target_neuron = connection->get_neuron_out();
-
-                local_idx_2 = neuron_local_mapping[target_neuron->get_idx()];
-                if(local_idx_2 >= 0){
-                    //this edge is part of the subnetwork
-                    Connection* new_connection = connection->get_copy( local_local_n_arr[local_idx_1], local_local_n_arr[local_idx_2] );
+        delete this->neuron_layers_feedbackward;
+        this->neuron_layers_feedbackward = nullptr;
+    }
+}
 
-                    local_local_n_arr[local_idx_1]->add_connection_out(new_connection);
-                    local_local_n_arr[local_idx_2]->add_connection_in(new_connection);
+NeuralNetwork* NeuralNetwork::get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices){
+    NeuralNetwork *output_net = nullptr;
+// TODO rework due to the changed structure of the class
+//    Neuron * active_neuron, * target_neuron;
+//
+//    size_t n = this->neurons->size();
+//    bool *part_of_subnetwork = new bool[n];
+//    std::fill(part_of_subnetwork, part_of_subnetwork + n, false);
+//
+//    bool *is_reachable_from_source = new bool[n];
+//    bool *is_reachable_from_destination = new bool[n];
+//    std::fill(is_reachable_from_source, is_reachable_from_source + n, false);
+//    std::fill(is_reachable_from_destination, is_reachable_from_destination + n, false);
+//
+//    bool *visited_neurons = new bool[n];
+//    std::fill(visited_neurons, visited_neurons + n, false);
+//
+//    size_t active_set_size[2];
+//    active_set_size[0] = 0;
+//    active_set_size[1] = 0;
+//    size_t * active_neuron_set = new size_t[2 * n];
+//    size_t idx1 = 0, idx2 = 1;
+//
+//    /* MAPPING BETWEEN NEURONS AND THEIR INDICES */
+//    size_t idx = 0, idx_target;
+//    for(Neuron *v: *this->neurons){
+//        v->set_idx( idx );
+//        idx++;
+//    }
+//
+//    /* INITIAL STATE FOR THE FORWARD PASS */
+//    for(size_t i: input_neuron_indices ){
+//
+//        if( i < 0 || i >= n){
+//            //invalid index
+//            continue;
+//        }
+//        active_neuron_set[idx1 * n + active_set_size[idx1]] = i;
+//        active_set_size[idx1]++;
+//
+//        visited_neurons[i] = true;
+//    }
+//
+//    /* FORWARD PASS */
+//    while(active_set_size[idx1] > 0){
+//
+//        //we iterate through the active neurons and propagate the signal
+//        for(int i = 0; i < active_set_size[idx1]; ++i){
+//            idx = active_neuron_set[i];
+//
+//            is_reachable_from_source[ idx ] = true;
+//
+//            active_neuron = this->neurons->at( idx );
+//
+//            for(Connection* connection: *(active_neuron->get_connections_out())){
+//
+//                target_neuron = connection->get_neuron_out( );
+//                idx_target = target_neuron->get_idx( );
+//
+//                if( visited_neurons[idx_target] ){
+//                    //this neuron was already analyzed
+//                    continue;
+//                }
+//
+//                visited_neurons[idx_target] = true;
+//                active_neuron_set[active_set_size[idx2] + n * idx2] = idx_target;
+//                active_set_size[idx2]++;
+//            }
+//        }
+//        idx1 = idx2;
+//        idx2 = (idx1 + 1) % 2;
+//        active_set_size[idx2] = 0;
+//    }
+//
+//
+//    /* INITIAL STATE FOR THE FORWARD PASS */
+//    active_set_size[0] = active_set_size[1] = 0;
+//    std::fill(visited_neurons, visited_neurons + n, false);
+//
+//    for(size_t i: output_neuron_indices ){
+//
+//        if( i < 0 || i >= n){
+//            //invalid index
+//            continue;
+//        }
+//        active_neuron_set[idx1 * n + active_set_size[idx1]] = i;
+//        active_set_size[idx1]++;
+//
+//        visited_neurons[i] = true;
+//    }
+//
+//    /* BACKWARD PASS */
+//    size_t n_new_neurons = 0;
+//    while(active_set_size[idx1] > 0){
+//
+//        //we iterate through the active neurons and propagate the signal
+//        for(int i = 0; i < active_set_size[idx1]; ++i){
+//            idx = active_neuron_set[i];
+//
+//            is_reachable_from_destination[ idx ] = true;
+//
+//            active_neuron = this->neurons->at( idx );
+//
+//            if(is_reachable_from_source[ idx ]){
+//                n_new_neurons++;
+//            }
+//
+//            for(Connection* connection: *(active_neuron->get_connections_in())){
+//
+//                target_neuron = connection->get_neuron_in( );
+//                idx_target = target_neuron->get_idx( );
+//
+//                if( visited_neurons[idx_target] ){
+//                    //this neuron was already analyzed
+//                    continue;
+//                }
+//
+//                visited_neurons[idx_target] = true;
+//                active_neuron_set[active_set_size[idx2] + n * idx2] = idx_target;
+//                active_set_size[idx2]++;
+//            }
+//        }
+//        idx1 = idx2;
+//        idx2 = (idx1 + 1) % 2;
+//        active_set_size[idx2] = 0;
+//    }
+//
+//    /* FOR CONSISTENCY REASONS */
+//    for(size_t in: input_neuron_indices){
+//        if( !is_reachable_from_destination[in] ){
+//            n_new_neurons++;
+//        }
+//        is_reachable_from_destination[in] = true;
+//    }
+//    /* FOR CONSISTENCY REASONS */
+//    for(size_t in: output_neuron_indices){
+//        if( !is_reachable_from_source[in] ){
+//            n_new_neurons++;
+//        }
+//        is_reachable_from_source[in] = true;
+//    }
+//
+//    /* WE FORM THE SET OF NEURONS IN THE OUTPUT NETWORK  */
+//    if(n_new_neurons > 0){
+////        printf("Number of new neurons: %d\n", n_new_neurons);
+//        output_net = new NeuralNetwork();
+//        output_net->set_weight_array( this->connection_weights );
+//
+//        std::vector<size_t > local_inputs(0), local_outputs(0);
+//        local_inputs.reserve(input_neuron_indices.size());
+//        local_outputs.reserve(output_neuron_indices.size());
+//
+//        std::vector<Neuron*> local_n_arr(0);
+//        local_n_arr.reserve( n_new_neurons );
+//
+//        std::vector<Neuron*> local_local_n_arr(0);
+//        local_local_n_arr.reserve( n_new_neurons );
+//
+//        int * neuron_local_mapping = new int[ n ];
+//        std::fill(neuron_local_mapping, neuron_local_mapping + n, -1);
+//        idx = 0;
+//        for(size_t i = 0; i < n; ++i){
+//            if(is_reachable_from_source[i] && is_reachable_from_destination[i]){
+//                neuron_local_mapping[i] = (int)idx;
+//                idx++;
+//
+//                Neuron *new_neuron = this->neurons->at(i)->get_copy( );
+//
+//                output_net->add_neuron( new_neuron );
+//                local_local_n_arr.push_back( new_neuron );
+//                local_n_arr.push_back( this->neurons->at(i) );
+//            }
+//        }
+//        for(size_t in: input_neuron_indices){
+//            local_inputs.push_back(neuron_local_mapping[in]);
+//        }
+//        for(size_t in: output_neuron_indices){
+//            local_outputs.push_back(neuron_local_mapping[in]);
+//        }
+//
+////        printf("%d\n", local_n_arr.size());
+////        printf("inputs: %d, outputs: %d\n", local_inputs.size(), local_outputs.size());
+//        int local_idx_1, local_idx_2;
+//        for(Neuron* source_neuron: local_n_arr){
+//            //we also add the relevant edges
+//            local_idx_1 = neuron_local_mapping[source_neuron->get_idx()];
+//
+//            for(Connection* connection: *(source_neuron->get_connections_out( ))){
+//                target_neuron = connection->get_neuron_out();
+//
+//                local_idx_2 = neuron_local_mapping[target_neuron->get_idx()];
+//                if(local_idx_2 >= 0){
+//                    //this edge is part of the subnetwork
+//                    Connection* new_connection = connection->get_copy( local_local_n_arr[local_idx_1], local_local_n_arr[local_idx_2] );
+//
+//                    local_local_n_arr[local_idx_1]->add_connection_out(new_connection);
+//                    local_local_n_arr[local_idx_2]->add_connection_in(new_connection);
+//
+////                    printf("adding a connection between neurons %d, %d\n", local_idx_1, local_idx_2);
+//                }
+//
+//            }
+//
+//        }
+//        output_net->specify_input_neurons(local_inputs);
+//        output_net->specify_output_neurons(local_outputs);
+//
+//
+//        delete [] neuron_local_mapping;
+//    }
+//
+//    delete [] is_reachable_from_source;
+//    delete [] is_reachable_from_destination;
+//    delete [] part_of_subnetwork;
+//    delete [] visited_neurons;
+//    delete [] active_neuron_set;
+//
+//
+    return output_net;
+}
 
-//                    printf("adding a connection between neurons %d, %d\n", local_idx_1, local_idx_2);
-                }
+size_t NeuralNetwork::add_neuron(Neuron *n, int bias_idx) {
 
-            }
+    size_t local_b_idx = 0;
+    if(bias_idx < 0){
+        local_b_idx = this->last_used_bias_idx;
+    }
+    else{
+        local_b_idx = (size_t)bias_idx;
+    }
 
-        }
-        output_net->specify_input_neurons(local_inputs);
-        output_net->specify_output_neurons(local_outputs);
+    if(this->neuron_biases->size() <= local_b_idx){
+        std::cerr << "Additional neuron cannot be added! The bias index is too large\n" << std::endl;
+        exit(-1);
+    }
 
+    this->outward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
+    this->inward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
 
-        delete [] neuron_local_mapping;
-    }
+    this->neurons->push_back(n);
+    n->set_bias( &(this->neuron_biases->at( local_b_idx )) );
 
-    delete [] is_reachable_from_source;
-    delete [] is_reachable_from_destination;
-    delete [] part_of_subnetwork;
-    delete [] visited_neurons;
-    delete [] active_neuron_set;
+    this->in_out_determined = false;
+    this->layers_analyzed = false;
 
+    this->n_neurons++;
+    this->last_used_bias_idx++;
+    this->neuron_potentials->resize(this->n_neurons);
 
-    return output_net;
+    return this->n_neurons - 1;
 }
 
+size_t NeuralNetwork::add_neuron_no_bias(Neuron *n) {
 
-NeuralNetwork::~NeuralNetwork() {
-    if(this->neurons){
-        delete this->neurons;
-        this->neurons = nullptr;
-    }
-    if(this->output_neurons){
-        delete this->output_neurons;
-        this->output_neurons = nullptr;
-    }
-    if(this->input_neurons){
-        delete this->input_neurons;
-        this->input_neurons = nullptr;
-    }
-    if(this->active_eval_set){
-        delete this->active_eval_set;
-        this->active_eval_set = nullptr;
-    }
-
-    if(this->connection_weights && this->delete_weights){
-        delete this->connection_weights;
-        this->connection_weights = nullptr;
-    }
-}
+    this->outward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
+    this->inward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
 
-int NeuralNetwork::add_neuron(Neuron *n) {
     this->neurons->push_back(n);
+
     this->in_out_determined = false;
-    n->set_idx(this->neurons->size() - 1);
+    this->layers_analyzed = false;
 
-    return n->get_idx();
+    this->n_neurons++;
+    this->neuron_potentials->resize(this->n_neurons);
+
+    return this->n_neurons - 1;
 }
 
-size_t NeuralNetwork::add_connection_simple(int n1_idx, int n2_idx) {
+size_t NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx) {
     return add_connection_simple(n1_idx, n2_idx, -1);
 }
 
-size_t NeuralNetwork::add_connection_simple(int n1_idx, int n2_idx, size_t weight_idx) {
+size_t NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx, int weight_idx) {
     return add_connection_simple(n1_idx, n2_idx, weight_idx, 1);
 }
 
-size_t NeuralNetwork::add_connection_simple(int n1_idx, int n2_idx, size_t weight_idx, double weight_value) {
-    // TODO generate weight_value automatically from normal distribution
+size_t NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx, int weight_idx, double weight_value) {
 
-    if(weight_idx < 0 || weight_idx >= this->connection_weights->size()){
-        //this weight is a new one, we add it to the system of weights
-        this->connection_weights->push_back(weight_value);
-        weight_idx = (int)this->connection_weights->size() - 1;
-        this->n_weights++;
+    size_t local_w_idx = 0;
+    if(weight_idx < 0){
+        local_w_idx = this->last_used_weight_idx;
+    }
+    else{
+        local_w_idx = (size_t)weight_idx;
     }
-    Neuron *neuron_out = this->neurons->at(n1_idx);
-    Neuron *neuron_in = this->neurons->at(n2_idx);
-
-    ConnectionWeightIdentity *con_weight_u1u2 = new ConnectionWeightIdentity(this->connection_weights);
-    con_weight_u1u2->SetParamIndex(weight_idx, 0);
-
-    Connection *u1u2 = new Connection(neuron_out, neuron_in, con_weight_u1u2);
 
-    neuron_out->add_connection_out(u1u2);
-    neuron_in->add_connection_in(u1u2);
+    if(this->connection_weights->size() <= local_w_idx){
+        std::cerr << "Additional connection cannot be added! Number of connections is saturated\n" << std::endl;
+        exit(-1);
+    }
 
-    return weight_idx;
-}
+    this->connection_weights->at(local_w_idx) = weight_value;
+    this->last_used_weight_idx++;
 
-void NeuralNetwork::add_connection_shared_power1(int n1_idx, int n2_idx, size_t weight_idx) {
-    std::function<double(double *, size_t*, size_t)> weight_function = [](double * weight_array, size_t * index_array, size_t n_params){
-        return weight_array[index_array[0]];
-    };
+    ConnectionFunctionIdentity *con_weight_u1u2 = new ConnectionFunctionIdentity( &(this->connection_weights->at(local_w_idx)), local_w_idx );
+    size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
 
-    size_t* weight_indices = new size_t[1];
-    double* weight_values = new double[1];
+    this->add_outward_connection(n1_idx, n2_idx, conn_idx);
+    this->add_inward_connection(n2_idx, n1_idx, conn_idx);
 
-    weight_indices[0] = weight_idx;
-    this->add_connection_general( n1_idx, n2_idx, &weight_function, weight_indices, weight_values, 1);
+    this->layers_analyzed = false;
 
-    delete [] weight_indices;
-    delete [] weight_values;
+    return this->connection_list->size() - 1;
 }
 
-void NeuralNetwork::add_connection_shared_power2(int n1_idx, int n2_idx, size_t weight_idx) {
-    std::function<double(double *, size_t*, size_t)> weight_function = [](double * weight_array, size_t * index_array, size_t n_params){
-        return weight_array[index_array[0]] * weight_array[index_array[0]];
-    };
+void NeuralNetwork::add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx,
+                                            NeuralNetwork &parent_network) {
 
-    size_t* weight_indices = new size_t[1];
-    double* weight_values = new double[1];
+    size_t conn_idx = this->add_new_connection_to_list(parent_network.connection_list->at( connection_idx ));
 
-    weight_indices[0] = weight_idx;
-    this->add_connection_general( n1_idx, n2_idx, &weight_function, weight_indices, weight_values, 1);
+    this->add_outward_connection(n1_idx, n2_idx, conn_idx);
+    this->add_inward_connection(n2_idx, n1_idx, conn_idx);
 
-    delete [] weight_indices;
-    delete [] weight_values;
+    this->layers_analyzed = false;
 }
 
-void NeuralNetwork::add_connection_general(int n1_idx, int n2_idx, std::function<double(double *, size_t*, size_t)> *f,
-                                           size_t* weight_indices, double* weight_values, size_t n_weights) {
-
-    ConnectionWeight *con_weight_u1u2 = new ConnectionWeight(n_weights, this->connection_weights);
-    //we analyze weights
-    size_t weight_idx = 0;
-    double weight_value = 0.0;
-    for(size_t wi = 0; wi < n_weights; ++wi){
-        weight_idx = weight_indices[wi];
-        weight_value = weight_values[wi];
-
-        if(weight_idx < 0 || weight_idx >= this->connection_weights->size()){
-            //this weight is a new one, we add it to the system of weights
-            this->connection_weights->push_back(weight_value);
-            weight_indices[wi] = (int)this->connection_weights->size() - 1;
+void NeuralNetwork::copy_parameter_space(std::vector<double> *parameters) {
+    if(parameters != nullptr){
+        for(unsigned int i = 0; i < this->connection_weights->size(); ++i){
+            (*this->connection_weights)[i] = (*parameters)[i];
         }
 
-        con_weight_u1u2->SetParamIndex(weight_indices[wi], wi);
+        for(unsigned int i = 0; i < this->neuron_biases->size(); ++i){
+            (*this->neuron_biases)[i] = (*parameters)[i + this->connection_weights->size()];
+        }
     }
-
-    Neuron *neuron_out = this->neurons->at(n1_idx);
-    Neuron *neuron_in = this->neurons->at(n2_idx);
-    Connection *u1u2 = new Connection(neuron_out, neuron_in, con_weight_u1u2);
-
-    neuron_out->add_connection_out(u1u2);
-    neuron_in->add_connection_in(u1u2);
-
 }
 
-void NeuralNetwork::determine_inputs_outputs() {
-    if(this->output_neurons){
-        delete this->output_neurons;
-    }
-
-    if(this->input_neurons){
-        delete this->input_neurons;
-    }
-
-    this->output_neurons = new std::vector<Neuron*>(0);
-    this->input_neurons = new std::vector<Neuron*>(0);
+void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork &parent_network) {
 
-    if(this->active_eval_set == nullptr){
-        this->active_eval_set = new std::vector<Neuron*>(this->neurons->size() * 2);
-    }
-    else{
-        this->active_eval_set->resize(this->neurons->size() * 2);
+    if(this->connection_weights){
+        delete connection_weights;
     }
 
-    for(Neuron* neuron: *this->neurons){
-        if(neuron->get_connections_out()->empty()){
-            //this neuron has no outgoing connections, it is the output neuron
-            this->output_neurons->push_back(neuron);
-        }
-        else if(neuron->get_connections_in()->empty()){
-            //this neuron has no incoming connections, it is the input neuron
-            this->input_neurons->push_back(neuron);
-        }
+    if(this->neuron_biases){
+        delete this->neuron_biases;
     }
 
-    this->n_inputs = this->input_neurons->size();
-    this->n_outputs = this->output_neurons->size();
+    this->connection_weights = parent_network.connection_weights;
+    this->neuron_biases = parent_network.neuron_biases;
 
-    this->in_out_determined = true;
-}
-
-void NeuralNetwork::set_weight_array(std::vector<double> *weight_ptr) {
-    if( this->connection_weights && this->delete_weights ){
-        delete this->connection_weights;
-    }
-    this->connection_weights = weight_ptr;
+    this->delete_biases = false;
     this->delete_weights = false;
-
-    this->n_weights = this->connection_weights->size();
 }
 
-void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double> &output, double * custom_weights) {
+void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> * custom_weights_and_biases) {
     if(!this->in_out_determined && this->n_inputs * this->n_outputs <= 0){
-//        this->determine_inputs_outputs();
         std::cerr << "Input and output neurons have not been specified\n" << std::endl;
         exit(-1);
     }
@@ -428,130 +507,75 @@ void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double>
         exit(-1);
     }
 
-//    std::vector<double> *weight_ptr = this->connection_weights;
-//    std::vector<double> *custom_weight_ptr = nullptr;
-//
-//
-    if(custom_weights != nullptr){
-//        custom_weight_ptr = new std::vector<double>(custom_weights, custom_weights + this->n_weights);
-//        this->connection_weights = custom_weight_ptr;
-        this->connection_weights->assign(custom_weights, custom_weights + this->n_weights);
-    }
+    this->copy_parameter_space( custom_weights_and_biases );
 
 
+    this->analyze_layer_structure();
 
+    /* reset of the output and the neuron potentials */
     std::fill(output.begin(), output.end(), 0.0);
+    std::fill(this->neuron_potentials->begin(), this->neuron_potentials->end(), 0.0);
 
-    //reset of the potentials
-    for(Neuron* neuron: *this->neurons){
-        neuron->set_potential(0.0);
-        neuron->set_saturation_in(0);
-        neuron->set_saturation_out(0);
+    /* set the potentials of the input neurons */
+    for(unsigned int i = 0; i < this->n_inputs; ++i){
+        this->neuron_potentials->at( this->input_neuron_indices->at(i) ) = input[ i ];
     }
 
-    if(this->active_eval_set == nullptr){
-        this->active_eval_set = new std::vector<Neuron*>(this->neurons->size() * 2);
-    }
-    else{
-        this->active_eval_set->resize(this->neurons->size() * 2);
-    }
-
-    int active_set_size[2];
-    active_set_size[0] = 0;
-    active_set_size[1] = 0;
-
-    int idx1 = 0, idx2 = 1;
+    /* we iterate through all the feedforward layers and transfer the signals */
+    double pot;
+    for( auto layer: *this->neuron_layers_feedforward){
+        /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
 
-    active_set_size[0] = this->n_inputs;
-    int i = 0;
-    auto n = this->neurons->size();
-
-    for(Neuron* neuron: *this->input_neurons){
+        for( auto si: *layer ){
+            pot = this->neurons->at(si)->activate(this->neuron_potentials->at( si ));
 
-        this->active_eval_set->at(i) = neuron;
+            for(auto c: *this->outward_adjacency->at( si )){
+                size_t ti = c.first;
+                size_t ci = c.second;
 
-        neuron->set_potential(input[i]);
+                this->neuron_potentials->at( ti ) += this->connection_list->at( ci )->eval() * pot;
+            }
+        }
+    }
 
-#ifdef VERBOSE_NN_EVAL
-        printf("INPUT NEURON %d, POTENTIAL: %f\n", neuron, input[i]);
-#endif
+    unsigned int i = 0;
+    for(auto oi: *this->output_neuron_indices){
+        output[i] = this->neurons->at( oi )->activate( this->neuron_potentials->at( oi ) );
         ++i;
     }
-    Neuron* active_neuron;
-    Neuron* target_neuron;
-
-    while(active_set_size[idx1] > 0){
+}
 
-        //we iterate through the active neurons and propagate the signal
-        for(i = 0; i < active_set_size[idx1]; ++i){
-            active_neuron = this->active_eval_set->at(i + n * idx1);
-            active_neuron->activate();//computes the activation function on its potential
-#ifdef VERBOSE_NN_EVAL
-            printf(" active neuron %d, potential: %f, state: %f\n", active_neuron, active_neuron->get_potential(), active_neuron->get_state());
-#endif
-            for(Connection* connection: *(active_neuron->get_connections_out())){
-                connection->pass_signal();
-
-                target_neuron = connection->get_neuron_out();
-                target_neuron->adjust_saturation_in(1);
-
-                if(target_neuron->is_saturated_in()){
-                    this->active_eval_set->at(active_set_size[idx2] + n * idx2) = target_neuron;
-                    active_set_size[idx2]++;
-                }
-            }
-        }
-#ifdef VERBOSE_NN_EVAL
-        printf("-----------\n");
-#endif
+void NeuralNetwork::randomize_weights( ) {
 
-        idx1 = idx2;
-        idx2 = (idx1 + 1) % 2;
+    if( this->last_used_weight_idx <= 0 ){
 
-        active_set_size[idx2] = 0;
+        return;
     }
 
-    i = 0;
-    for(Neuron* neuron: *this->output_neurons){
-
-        output[i] = neuron->get_state();
-
-#ifdef VERBOSE_NN_EVAL
-        printf("OUTPUT NEURON %d, VALUE: %f\n", neuron, output[i]);
-#endif
+    boost::random::mt19937 gen;
 
-        ++i;
-    }
+    // Init weight guess ("optimal" for logistic activation functions)
+    double r = 4 * sqrt(6./(this->last_used_weight_idx));
 
-//    this->connection_weights = weight_ptr;
-}
+    boost::random::uniform_real_distribution<> dist(-r, r);
 
-void NeuralNetwork::copy_weights(double *weights) {
-    for(unsigned int i = 0; i < this->connection_weights->size(); ++i){
-        (*this->connection_weights)[i] = weights[i];
+    for(size_t i = 0; i < this->last_used_weight_idx; i++) {
+        this->connection_weights->at(i) = dist(gen);
     }
 }
 
-std::vector<double>* NeuralNetwork::get_weights_ptr(){
-    return this->connection_weights;
-}
-
-void NeuralNetwork::randomize_weights() {
-
-    if( this->n_weights <= 0 ){
+void NeuralNetwork::randomize_biases( ) {
 
+    if( this->last_used_bias_idx <= 0 ){
         return;
     }
 
     boost::random::mt19937 gen;
 
     // Init weight guess ("optimal" for logistic activation functions)
-    double r = 4 * sqrt(6./(this->n_weights));
-
-    boost::random::uniform_real_distribution<> dist(-r, r);
-
-    for(size_t i = 0; i < this->n_weights; i++) {
-        this->connection_weights->at(i) = dist(gen);
+    boost::random::uniform_real_distribution<> dist(-1, 1);
+    for(size_t i = 0; i < this->last_used_bias_idx; i++) {
+        this->neuron_biases->at(i) = dist(gen);
     }
 }
 
@@ -564,40 +588,43 @@ size_t  NeuralNetwork::get_n_outputs() {
 }
 
 size_t NeuralNetwork::get_n_weights() {
-    if(!this->n_weights) {
-        this->n_weights = this->connection_weights->size();
+    if(!this->connection_weights){
+        return 0;
     }
-    return this->n_weights;
+    return this->connection_weights->size();
 }
 
-void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
-    if( !this->input_neurons ){
-        this->input_neurons = new std::vector<Neuron*>();
+size_t NeuralNetwork::get_n_biases() {
+    if(!this->neuron_biases){
+        return 0;
     }
-    this->input_neurons->reserve( input_neurons_indices.size() );
+    return this->neuron_biases->size();
+}
 
-    for( size_t e: input_neurons_indices ){
-        if( e < 0 || e >= this->neurons->size() ){
-            continue;
-        }
-        this->input_neurons->push_back( this->neurons->at(e) );
+size_t NeuralNetwork::get_n_neurons() {
+    return this->n_neurons;
+}
+
+void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
+    if( !this->input_neuron_indices ){
+        this->input_neuron_indices = new std::vector<size_t>(input_neurons_indices);
+    }
+    else{
+        delete this->input_neuron_indices;
+        this->input_neuron_indices = new std::vector<size_t>(input_neurons_indices);
     }
-    this->n_inputs = this->input_neurons->size();
+    this->n_inputs = input_neurons_indices.size();
 }
 
 void NeuralNetwork::specify_output_neurons(std::vector<size_t> &output_neurons_indices) {
-    if( !this->output_neurons ){
-        this->output_neurons = new std::vector<Neuron*>();
+    if( !this->output_neuron_indices ){
+        this->output_neuron_indices = new std::vector<size_t>(output_neurons_indices);
     }
-    this->output_neurons->reserve( output_neurons_indices.size() );
-
-    for( size_t e: output_neurons_indices ){
-        if( e < 0 || e >= this->neurons->size() ){
-            continue;
-        }
-        this->output_neurons->push_back( this->neurons->at(e) );
+    else{
+        delete this->output_neuron_indices;
+        this->output_neuron_indices = new std::vector<size_t>(output_neurons_indices);
     }
-    this->n_outputs = this->output_neurons->size();
+    this->n_outputs = output_neurons_indices.size();
 }
 
 void NeuralNetwork::print_weights() {
@@ -613,5 +640,174 @@ void NeuralNetwork::print_weights() {
 }
 
 void NeuralNetwork::print_stats(){
-    printf("Number of neurons: %d, number of weights: %d\n", this->neurons->size(), this->n_weights);
+    printf("Number of neurons: %d, number of active weights: %d, number of active biases: %d\n", (int)this->neurons->size(), (int)this->last_used_weight_idx, (int)this->last_used_bias_idx);
+}
+
+std::vector<double>* NeuralNetwork::get_parameter_ptr_biases() {
+    return this->neuron_biases;
+}
+
+std::vector<double>* NeuralNetwork::get_parameter_ptr_weights() {
+    return this->connection_weights;
+}
+
+size_t NeuralNetwork::add_new_connection_to_list(ConnectionFunctionGeneral *con) {
+    this->connection_list->push_back(con);
+    return this->connection_list->size() - 1;
+}
+
+void NeuralNetwork::add_inward_connection(size_t s, size_t t, size_t con_idx) {
+    if(!this->inward_adjacency->at(s)){
+        this->inward_adjacency->at(s) = new std::vector<std::pair<size_t, size_t>>(0);
+    }
+    this->inward_adjacency->at(s)->push_back(std::pair(t, con_idx));
+}
+
+void NeuralNetwork::add_outward_connection(size_t s, size_t t, size_t con_idx) {
+    if(!this->outward_adjacency->at(s)){
+        this->outward_adjacency->at(s) = new std::vector<std::pair<size_t, size_t>>(0);
+    }
+    this->outward_adjacency->at(s)->push_back(std::pair(t, con_idx));
+}
+
+void NeuralNetwork::analyze_layer_structure() {
+
+    if(this->layers_analyzed){
+        //nothing to do
+        return;
+    }
+
+    /* space allocation */
+    if(this->neuron_layers_feedforward){
+        for(auto e: *this->neuron_layers_feedforward){
+            delete e;
+            e = nullptr;
+        }
+        delete this->neuron_layers_feedforward;
+        this->neuron_layers_feedforward = nullptr;
+    }
+
+    if(this->neuron_layers_feedbackward){
+        for(auto e: *this->neuron_layers_feedbackward){
+            delete e;
+            e = nullptr;
+        }
+        delete this->neuron_layers_feedbackward;
+        this->neuron_layers_feedbackward = nullptr;
+    }
+
+    this->neuron_layers_feedforward = new std::vector<std::vector<size_t>*>(0);
+    this->neuron_layers_feedbackward = new std::vector<std::vector<size_t>*>(0);
+
+
+    auto n = this->neurons->size();
+
+    /* helpful counters */
+    std::vector<size_t> inward_saturation(n);
+    std::vector<size_t> outward_saturation(n);
+    std::fill(inward_saturation.begin(), inward_saturation.end(), 0);
+    std::fill(outward_saturation.begin(), outward_saturation.end(), 0);
+    for(unsigned int i = 0; i < n; ++i){
+        if(this->inward_adjacency->at(i)){
+            inward_saturation[i] = this->inward_adjacency->at(i)->size();
+        }
+
+        if(this->outward_adjacency->at(i)){
+            outward_saturation[i] = this->outward_adjacency->at(i)->size();
+        }
+    }
+
+
+    std::vector<size_t> active_eval_set(2 * n);
+    size_t active_set_size[2];
+
+    /* feedforward analysis */
+    active_set_size[0] = 0;
+    active_set_size[1] = 0;
+
+    size_t idx1 = 0, idx2 = 1;
+
+    active_set_size[0] = this->n_inputs;
+    size_t i = 0;
+    for(i = 0; i < this->n_inputs; ++i){
+        active_eval_set[i] = this->input_neuron_indices->at(i);
+    }
+
+    size_t active_ni;
+    while(active_set_size[idx1] > 0){
+
+        /* we add the current active set as the new outward layer */
+        std::vector<size_t> *new_feedforward_layer = new std::vector<size_t>(active_set_size[idx1]);
+        this->neuron_layers_feedforward->push_back( new_feedforward_layer );
+
+        //we iterate through the active neurons and propagate the signal
+        for(i = 0; i < active_set_size[idx1]; ++i){
+            active_ni = active_eval_set[i + n * idx1];
+            new_feedforward_layer->at( i ) = active_ni;
+
+            if(!this->outward_adjacency->at(active_ni)){
+                continue;
+            }
+
+            for(auto ni: *(this->outward_adjacency->at(active_ni))){
+                inward_saturation[ni.first]--;
+
+                if(inward_saturation[ni.first] == 0){
+                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
+                    active_set_size[idx2]++;
+                }
+            }
+        }
+
+        idx1 = idx2;
+        idx2 = (idx1 + 1) % 2;
+
+        active_set_size[idx2] = 0;
+    }
+
+
+    /* feed backward analysis */
+    active_set_size[0] = 0;
+    active_set_size[1] = 0;
+
+    idx1 = 0;
+    idx2 = 1;
+
+    active_set_size[0] = this->n_outputs;
+    for(i = 0; i < this->n_outputs; ++i){
+        active_eval_set[i] = this->output_neuron_indices->at(i);
+    }
+
+    while(active_set_size[idx1] > 0){
+
+        /* we add the current active set as the new outward layer */
+        std::vector<size_t> *new_feedbackward_layer = new std::vector<size_t>(active_set_size[idx1]);
+        this->neuron_layers_feedbackward->push_back( new_feedbackward_layer );
+
+        //we iterate through the active neurons and propagate the signal backward
+        for(i = 0; i < active_set_size[idx1]; ++i){
+            active_ni = active_eval_set[i + n * idx1];
+            new_feedbackward_layer->at( i ) = active_ni;
+
+            if(!this->inward_adjacency->at(active_ni)){
+                continue;
+            }
+
+            for(auto ni: *(this->inward_adjacency->at(active_ni))){
+                outward_saturation[ni.first]--;
+
+                if(outward_saturation[ni.first] == 0){
+                    active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
+                    active_set_size[idx2]++;
+                }
+            }
+        }
+
+        idx1 = idx2;
+        idx2 = (idx1 + 1) % 2;
+
+        active_set_size[idx2] = 0;
+    }
+
+    this->layers_analyzed = true;
 }
\ No newline at end of file
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index 68b558fd5751c8b0a8e1c20ae34b243492253327..b91640ce4b2695bcfbb0b0434fec0209c4b30a09 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -1,18 +1,22 @@
 /**
- * DESCRIPTION OF THE FILE
+ * This file contains the header for the NeuralNetwork class representing a function in the form of a directed graph,
+ * in which the vertices are called Neurons (with activation functions) and the edges Connections (with transfer functions)
  *
  * @author Michal KravĨenko
  * @date 13.6.18 -
  */
 
- //TODO pouvazovat o pridani indexu k neuronum, abychom meli urcitou kontrolu nad poradim vstupu a vystupu?
- //TODO rucni nastaveni vstupnich a vystu[nich neuronu
+//TODO preprocess the feed-forward and backward passes for more efficient parallelism
 
 #ifndef INC_4NEURO_NEURALNETWORK_H
 #define INC_4NEURO_NEURALNETWORK_H
 
 #include <vector>
+#include <algorithm>
+#include <utility>
 #include "../Neuron/Neuron.h"
+#include "../NetConnection/ConnectionFunctionGeneral.h"
+#include "../NetConnection/ConnectionFunctionIdentity.h"
 #include "../settings.h"
 
 enum NET_TYPE{GENERAL};
@@ -28,58 +32,134 @@ private:
      */
     NET_TYPE network_type = GENERAL;
 
-     /**
-      *
-      */
-     std::vector<Neuron*> *neurons = nullptr;
+    /**
+     *
+     */
+    std::vector<Neuron*> *neurons = nullptr;
+
+    /**
+     *
+     */
+    std::vector<size_t>* input_neuron_indices = nullptr;
+
+    /**
+     *
+     */
+    std::vector<size_t>* output_neuron_indices = nullptr;
+
+    /**
+     *
+     */
+    std::vector<double>* connection_weights = nullptr;
+
+    /**
+     *
+     */
+    std::vector<double>* neuron_biases = nullptr;
+
+    /**
+     *
+     */
+    std::vector<double>* neuron_potentials = nullptr;
+
+    /**
+     *
+     */
+    std::vector<ConnectionFunctionGeneral*> * connection_list = nullptr;
+
+    /**
+     *
+     */
+    std::vector<std::vector<std::pair<size_t, size_t>>*> * inward_adjacency = nullptr;
+
+    /**
+     *
+     */
+    std::vector<std::vector<std::pair<size_t, size_t>>*> * outward_adjacency = nullptr;
+
+    /**
+     *
+     */
+    std::vector<std::vector<size_t>*> *neuron_layers_feedforward = nullptr;
+
+    /**
+     *
+     */
+    std::vector<std::vector<size_t>*> *neuron_layers_feedbackward = nullptr;
+
+    /**
+     *
+     */
+    size_t n_inputs = 0;
 
-     /**
-      *
-      */
-     std::vector<Neuron*>* input_neurons = nullptr;
+    /**
+     *
+     */
+    size_t n_outputs = 0;
 
-     /**
-      *
-      */
-     std::vector<Neuron*>* output_neurons = nullptr;
+    /**
+     *
+     */
+    size_t last_used_weight_idx = 0;
 
-     std::vector<double>* connection_weights = nullptr;
+    /**
+     *
+     */
+    size_t last_used_bias_idx = 0;
+
+    /**
+     *
+     */
+    size_t n_neurons = 0;
 
-     /**
-      *
-      */
-     size_t n_inputs = 0;
+    /**
+     *
+     */
+    bool in_out_determined = false;
 
-     /**
-      *
-      */
-     size_t n_outputs = 0;
+    /**
+     *
+     */
+    bool layers_analyzed = false;
 
-     /**
-      *
-      */
-     size_t n_weights = 0;
+    /**
+     *
+     */
+    bool delete_weights = true;
 
-     /**
-      *
-      */
-     bool in_out_determined = false;
+    /**
+     *
+     */
+    bool delete_biases = true;
 
-     /**
-      *
-      */
-     bool delete_weights = true;
+    /**
+     * Adds a new connection to the local list of connections
+     * @param con Connection object to be added
+     * @return Returns the index of the added connection among all the connections
+     */
+    size_t add_new_connection_to_list(ConnectionFunctionGeneral* con);
 
-     /**
-      *
-      */
-     std::vector<Neuron*>* active_eval_set = nullptr;
+    /**
+     * Adds a new entry (oriented edge s -> t) to the adjacency list of this network
+     * @param s Index of the source neuron
+     * @param t Index of the target neuron
+     * @param con_idx Index of the connection representing the edge
+     */
+    void add_outward_connection(size_t s, size_t t, size_t con_idx);
 
-     /**
-      *
-      */
-     void determine_inputs_outputs();
+    /**
+     * Adds a new entry (oriented edge s <- t) to the adjacency list of this network
+     * @param s Index of the source neuron
+     * @param t Index of the target neuron
+     * @param con_idx Index of the connection representing the edge
+     */
+    void add_inward_connection(size_t s, size_t t, size_t con_idx);
 
+    /**
+     * Performs one feedforward pass and feedbackward pass during which determines the layers of this neural network
+     * for simpler use during evaluation and learning
+     */
+    void analyze_layer_structure( );
 
 
 public:
@@ -89,11 +169,16 @@ public:
      */
     NeuralNetwork();
 
+    /**
+     *
+     */
+    NeuralNetwork(size_t n_connection_weights, size_t n_neurons);
+
 
     /**
      *
      */
-    ~NeuralNetwork();
+    virtual ~NeuralNetwork();
 
     /**
      * If possible, returns a neural net with 'input_neuron_indices' neurons as inputs and 'output_neuron_indices' as
@@ -105,119 +190,120 @@ public:
      */
     NeuralNetwork* get_subnet(std::vector<size_t> &input_neuron_indices, std::vector<size_t> &output_neuron_indices);
 
+    /**
+     * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values
+     * @param parameters
+     */
+    virtual void copy_parameter_space(std::vector<double> *parameters);
+
+    /**
+     * Copies the pointers @{this->connection_weights} and @{this->neuron_biases} from the parental network, sets
+     * flags to not delete the vectors in this object
+     * @param parent_network
+     */
+    virtual void set_parameter_space_pointers( NeuralNetwork &parent_network );
+
     /**
      *
-     * @param[in] input
-     * @param[in,out] output
+     * @param input
+     * @param output
+     * @param custom_weights_and_biases
      */
-    virtual void eval_single(std::vector<double> &input, std::vector<double> &output, double *custom_weights = nullptr);
+    virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
 
 
 
     /**
-     *
+     * Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
      * @param[in] n
      * @return
      */
-    int add_neuron(Neuron* n);
+    size_t add_neuron(Neuron* n, int bias_idx = -1 );
 
     /**
-     *
-     * @param n1_idx
-     * @param n2_idx
+     * Adds a new neuron to this network, does not touch its bias.
+     * @param n
      * @return
      */
-    size_t add_connection_simple(int n1_idx, int n2_idx);
+    size_t add_neuron_no_bias(Neuron *n);
 
     /**
      *
      * @param n1_idx
      * @param n2_idx
-     * @param weight_idx
      * @return
      */
-    size_t add_connection_simple(int n1_idx, int n2_idx, size_t weight_idx);
+    size_t add_connection_simple(size_t n1_idx, size_t n2_idx);
 
     /**
      *
      * @param n1_idx
      * @param n2_idx
      * @param weight_idx
-     * @param weight_value
      * @return
      */
-    size_t add_connection_simple(int n1_idx, int n2_idx, size_t weight_idx, double weight_value);
+    size_t add_connection_simple(size_t n1_idx, size_t n2_idx, int weight_idx);
 
     /**
      *
      * @param n1_idx
      * @param n2_idx
      * @param weight_idx
-     * @param power_degree
+     * @param weight_value
+     * @return
      */
-    void add_connection_shared_power1(int n1_idx, int n2_idx, size_t weight_idx);
+    size_t add_connection_simple(size_t n1_idx, size_t n2_idx, int weight_idx, double weight_value);
 
     /**
-     *
+     * Take the existing connection with index 'connection_idx' in 'parent_network' and adds it to the structure of this
+     * object
      * @param n1_idx
      * @param n2_idx
-     * @param weight_idx
-     * @param power_degree
+     * @param connection_idx
+     * @param parent_network
      */
-    void add_connection_shared_power2(int n1_idx, int n2_idx, size_t weight_idx);
+    void add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx, NeuralNetwork &parent_network );
 
-    /**
-     *
-     * @param n1_idx
-     * @param n2_idx
-     * @param f
-     * @param weight_indices
-     * @param weight_values
-     * @param n_weights
-     */
-    void add_connection_general(int n1_idx, int n2_idx, std::function<double(double *, size_t*, size_t)> *f,
-                                size_t* weight_indices, double* weight_values, size_t n_weights);
 
     /**
      *
-     * @param weight_ptr
      */
-    void set_weight_array( std::vector<double>* weight_ptr );
+    void randomize_weights();
 
     /**
      *
-     * @param weights
      */
-    void copy_weights(double *weights);
+    void randomize_biases();
 
     /**
      *
      * @return
      */
-    std::vector<double>* get_weights_ptr();
+    virtual size_t get_n_inputs();
 
     /**
      *
+     * @return
      */
-    void randomize_weights();
+    virtual size_t get_n_outputs();
 
     /**
      *
      * @return
      */
-    size_t get_n_inputs();
+    virtual size_t get_n_weights();
 
     /**
      *
      * @return
      */
-    size_t get_n_outputs();
+    virtual size_t get_n_biases();
 
     /**
      *
      * @return
      */
-    virtual size_t get_n_weights();
+    virtual size_t get_n_neurons();
 
     /**
      *
@@ -240,6 +326,18 @@ public:
      *
      */
     void print_stats();
+
+    /**
+     *
+     * @return
+     */
+    std::vector<double>* get_parameter_ptr_weights();
+
+    /**
+     *
+     * @return
+     */
+    std::vector<double>* get_parameter_ptr_biases();
 };
 
 
diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp
index 6bc4be452a1b81b238fc01326c45028626f7ccbd..fc73dec6c14d10bbbeabb544f9ad5eb11017b9d4 100644
--- a/src/Network/NeuralNetworkSum.cpp
+++ b/src/Network/NeuralNetworkSum.cpp
@@ -33,14 +33,14 @@ void NeuralNetworkSum::add_network(NeuralNetwork *net, double alpha) {
     this->summand_coefficient->push_back( alpha );
 }
 
-void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output, double *custom_weights) {
+void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases) {
     std::vector<double> mem_output(output.size());
     std::fill(output.begin(), output.end(), 0.0);
 
-    for(int ni = 0; ni < this->summand->size(); ++ni){
-        this->summand->at(ni)->eval_single(input, mem_output, custom_weights);
+    for(size_t ni = 0; ni < this->summand->size(); ++ni){
+        this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);
         double alpha = this->summand_coefficient->at(ni);
-        for(int j = 0; j < output.size(); ++j){
+        for(size_t j = 0; j < output.size(); ++j){
             output[j] += mem_output[j] * alpha;
         }
     }
@@ -48,10 +48,46 @@ void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<doubl
 }
 
 size_t NeuralNetworkSum::get_n_weights(){
-    //TODO stupid solution, assumes the networks share weights
+    //TODO insufficient solution, assumes the networks share weights
     if(this->summand){
         return this->summand->at(0)->get_n_weights();
     }
 
+    return 0;
+}
+
+size_t NeuralNetworkSum::get_n_biases(){
+    //TODO insufficient solution, assumes the networks share weights
+    if(this->summand){
+        return this->summand->at(0)->get_n_biases();
+    }
+
+    return 0;
+}
+
+size_t NeuralNetworkSum::get_n_inputs() {
+    //TODO insufficient solution, assumes the networks share weights
+    if(this->summand){
+        return this->summand->at(0)->get_n_inputs();
+    }
+
+    return 0;
+}
+
+size_t NeuralNetworkSum::get_n_neurons() {
+    //TODO insufficient solution, assumes the networks share weights
+    if(this->summand){
+        return this->summand->at(0)->get_n_neurons();
+    }
+
+    return 0;
+}
+
+size_t NeuralNetworkSum::get_n_outputs() {
+    //TODO insufficient solution, assumes the networks share weights
+    if(this->summand){
+        return this->summand->at(0)->get_n_outputs();
+    }
+
     return 0;
 }
\ No newline at end of file
diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h
index 7c0cba3651a1e87906c1e38948fb9f13c097107b..d31df8ece4965ce4cff9cc41c5cba114a1a4303c 100644
--- a/src/Network/NeuralNetworkSum.h
+++ b/src/Network/NeuralNetworkSum.h
@@ -16,14 +16,42 @@ private:
     std::vector<double> * summand_coefficient;
 
 public:
-    NeuralNetworkSum();
-    virtual ~NeuralNetworkSum();
+    NeuralNetworkSum( );
+    virtual ~NeuralNetworkSum( );
 
     void add_network( NeuralNetwork *net, double alpha = 1.0 );
 
-    virtual void eval_single(std::vector<double> &input, std::vector<double> &output, double *custom_weights = nullptr) override;
-
-    virtual size_t get_n_weights();
+    virtual void eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases = nullptr);
+
+    /**
+     *
+     * @return
+     */
+    virtual size_t get_n_inputs() override;
+
+    /**
+     *
+     * @return
+     */
+    size_t get_n_outputs() override;
+
+    /**
+     *
+     * @return
+     */
+    virtual size_t get_n_weights() override;
+
+    /**
+     *
+     * @return
+     */
+    virtual size_t get_n_biases() override;
+
+    /**
+     *
+     * @return
+     */
+    virtual size_t get_n_neurons() override;
 };
 
 
diff --git a/src/Neuron/Neuron.cpp b/src/Neuron/Neuron.cpp
index a230bdeb0391cb6f6ac17d4f9b8a12b75382ebdb..c905e2907e50eb776c28c887cae9242791fd7b9f 100644
--- a/src/Neuron/Neuron.cpp
+++ b/src/Neuron/Neuron.cpp
@@ -1,134 +1,14 @@
 
 #include "Neuron.h"
 
-//TODO write INIT method to allocate edges_in and edges_out, so
-// it doesn't have to be written in every inherited class
 Neuron::~Neuron() {
-    if(this->activation_function_parameters){
-        delete [] this->activation_function_parameters;
-        this->activation_function_parameters = nullptr;
-    }
 
-    if(this->edges_out){
-        for(auto *edge: *this->edges_out){
-            delete edge;
-        }
-        delete this->edges_out;
-        this->edges_out = nullptr;
-    }
-
-    if(this->edges_in){
-        delete this->edges_in;
-        this->edges_in = nullptr;
-    }
-}
-
-void Neuron::set_saturation_in(int value) {
-    this->n_saturated_connections_in = value;
-}
-
-void Neuron::adjust_saturation_in(int value) {
-    this->n_saturated_connections_in += value;
-}
-
-bool Neuron::is_saturated_in() {
-    if(this->n_saturated_connections_in == this->edges_in->size()){
-        return true;
-    }
-
-    return false;
-}
-
-void Neuron::set_saturation_out(int value) {
-    this->n_saturated_connections_out = value;
-}
-
-void Neuron::adjust_saturation_out(int value) {
-    this->n_saturated_connections_out += value;
-}
-
-
-bool Neuron::is_saturated_out() {
-    return this->n_saturated_connections_out == this->edges_out->size();
-}
-
-
-void Neuron::adjust_potential(double input_signal) {
-    this->potential += input_signal;
-}
-
-double Neuron::get_potential() {
-    return this->potential;
-}
-
-void Neuron::set_potential(double value) {
-    this->potential = value;
-}
-
-double Neuron::get_state() {
-    return this->state;
 }
 
-void Neuron::set_state(double value) {
-    this->state = value;
+void Neuron::set_bias(double *b) {
+    this->bias = b;
 }
 
-int Neuron::activation_function_get_n_parameters(){
-    return this->n_activation_function_parameters;
-}
-
-void Neuron::activation_function_adjust_parameter(int param_idx, double value) {
-    this->activation_function_parameters[param_idx] += value;
-}
-
-void Neuron::activation_function_set_parameter(int param_idx, double value) {
-    this->activation_function_parameters[param_idx] = value;
-}
-
-
-double Neuron::activation_function_get_parameter(int param_idx) {
-    /**
-     * TODO
-     * Check out of range
-     */
-    return this->activation_function_parameters[param_idx];
-}
-
-void Neuron::add_connection_in(Connection *con) {
-    if(!this->edges_in){
-        this->edges_in = new std::vector<Connection*>(0);
-    }
-    this->edges_in->push_back(con);
-}
-
-void Neuron::add_connection_out(Connection *con) {
-    if(!this->edges_out){
-        this->edges_out = new std::vector<Connection*>(0);
-    }
-    this->edges_out->push_back(con);
-}
-
-std::vector<Connection*>* Neuron::get_connections_in() {
-    return this->edges_in;
-}
-
-std::vector<Connection*>* Neuron::get_connections_out() {
-    return this->edges_out;
-}
-
-size_t Neuron::get_idx() {
-    return this->neural_net_index;
-}
-
-void Neuron::set_idx(size_t value) {
-    this->neural_net_index = value;
-}
-
-Neuron* IDifferentiable::get_derivative() {
-    return nullptr;
-}
-
-
 //template<class Archive>
 //void Neuron::serialize(Archive & ar, const unsigned int version) {
 //    ar << this->potential;
diff --git a/src/Neuron/Neuron.h b/src/Neuron/Neuron.h
index ee8290c57a1075d7b10d549b98b7b3d80ab4ceab..91540ac39d482c0c00d936dd22fbfea5eac2b4e3 100644
--- a/src/Neuron/Neuron.h
+++ b/src/Neuron/Neuron.h
@@ -6,87 +6,40 @@
  * @author Michal KravĨenko
  * @date 2017 - 2018
  */
-
+//TODO  correct docs in this and all child classes
  #ifndef NEURON_H_
  #define NEURON_H_
 
 #include <boost/serialization/base_object.hpp>
 #include <vector>
-#include "../NetConnection/Connection.h"
 
-class Connection;
+class IDifferentiable;
 
 /**
   * Abstract class representing a general neuron
   */
-class Neuron{
+class Neuron {
     friend class boost::serialization::access;
 
 protected:
     /**
-     * Inner potential of the neuron (input of the activation function)
-     */
-    double potential = 0.0;
-
-    /**
-     * State of the neuron (output of the activation function)
-     */
-    double state = 0.0;
-
-    /**
-     * array of the activation function parameters
-     */
-    double *activation_function_parameters = nullptr;
-
-    /**
-     * Number of parameters of the activation function
-     */
-    unsigned int n_activation_function_parameters = 0;
-
-    /**
-     * Keeps track of the number of saturated INCOMING connections
-     */
-    unsigned int n_saturated_connections_in = 0;
-
-    /**
-     * Keeps track of the number of saturated OUTGOING connections
-     */
-    unsigned int n_saturated_connections_out = 0;
-
-    /**
-     * Index of this neuron among the neurons in the neural network
-     */
-    size_t neural_net_index;
-
-    /**
-     * A pointer to a vector containing pointers to incoming connections
-     */
-    std::vector<Connection*> *edges_in = nullptr;
-
-    /**
-     * A pointer to a vector containing pointers to outgoing connections
+     *
      */
-    std::vector<Connection*> *edges_out = nullptr;
+    double * bias;
 
     template<class Archive>
     void serialize(Archive & ar, const unsigned int version){
         //TODO separate implementation to Neuron.cpp!
-        ar & this->potential;
-        ar & this->state;
-
-        for(unsigned short int i = 0; i < this->n_activation_function_parameters; i++) {
-            ar & this->activation_function_parameters[i];
-        }
+//        ar & this->potential;
+//        ar & this->state;
+//
+//        for(unsigned short int i = 0; i < this->n_activation_function_parameters; i++) {
+//            ar & this->activation_function_parameters[i];
+//        }
     };
 
 public:
 
-    /**
-     * Instantiates a copy of this object and returns a pointer to it
-     * @return
-     */
-    virtual Neuron* get_copy( ) = 0;
-
     /**
      * Destructor of the Neuron object
      * this level deallocates the array 'activation_function_parameters'
@@ -95,139 +48,17 @@ public:
     virtual ~Neuron();
 
     /**
-     * Performs the activation function and stores the result into the 'state' property
-     */
-    virtual void activate( ) = 0;
-
-    /**
-     * Sets the number of saturated INPUT connections to the prescribed value
-     * @param[in] value The number of saturated INCOMING connections
-     */
-    virtual void set_saturation_in(int value) final;
-
-    /**
-     * Increases the saturation counter of the INCOMING connections by the specified amount
-     * @param value Amount to be added to the counter
-     */
-    virtual void adjust_saturation_in(int value) final;
-
-    /**
-     * Returns true if the neuron is saturated via the INCOMING connections
-     * @return true/false
-     */
-    virtual bool is_saturated_in() final;
-
-    /**
-     * Returns true if the neuron is saturated via the OUTGOING connections
-     * @return true/false
-     */
-    virtual bool is_saturated_out() final;
-
-    /**
-     * Sets the number of saturated OUTPUT connections to the prescribed value
-     * @param[in] value The number of saturated OUTGOING connections
-     */
-    virtual void set_saturation_out(int value) final;
-
-    /**
-     * Increases the saturation counter of the OUTGOING connections by the specified amount
-     * @param value Amount to be added to the counter
-     */
-    virtual void adjust_saturation_out(int value) final;
-
-    /**
-     * Adds the input signal value to the current potential
-     * @param[in] input_signal Input value
-     */
-    virtual void adjust_potential(double input_signal);
-
-    /**
-     * Getter to the 'potential' property
-     * @return Value of the neuron's potential (real number)
-     */
-    virtual double get_potential();
-
-    /**
-     * Setter to the 'potential' property
-     * @param[in] value Value of the neuron's potential (real number)
-     */
-    virtual void set_potential(double value);
-
-    /**
-     * Getter to the 'state' property
-     * @return Value of the current neuron's state
-     */
-    virtual double get_state();
-
-    /**
-     * Setter to the 'state' component
-     * @param[in] value Value of the current neuron's state
-     */
-    virtual void set_state(double value);
-
-    /**
-     * Returns the number of parameters the activation function relies on
-     * @return Number of the parameters
-     */
-    virtual int activation_function_get_n_parameters();
-
-    /**
-     * Adjusts the parameter with index 'param_idx' of the activation function
-     * by the value prescribed by 'value'
-     * @param[in] param_idx Index of the parameter to be adjusted
-     * @param[in] value Value of the adjustment
-     */
-    virtual void activation_function_adjust_parameter(int param_idx, double value);
-
-    /**
-     * Sets the parameter with index 'param_idx' of the activation function
-     * to the value prescribed by 'value'
-     * @param[in] param_idx
-     * @param[in] value
-     */
-    virtual void activation_function_set_parameter(int param_idx, double value);
-
-    /**
-     * Gets the parameter with index 'param_idx' of the activation function
-     * @param[in] param_idx
-     */
-    virtual double activation_function_get_parameter(int param_idx);
-
-    /**
-     * Adds a new incoming connection with this neuron as its end-point
-     * @param[in] con Incoming connection
-     */
-    virtual void add_connection_in(Connection *con) final;
-
-    /**
-     * Adds a new outgoing connection with this neuron as its source-point
-     * @param[in] con Outgoing connection
+     * Performs the activation function and returns the result
      */
-    virtual void add_connection_out(Connection *con) final;
-
-    /**
-     * Returns the pointer to the incoming connection vector
-     * @return
-     */
-    virtual std::vector<Connection*>* get_connections_in( ) final;
-
-    /**
-     * Returns the pointer to the outgoing connection vector
-     * @return
-     */
-    virtual std::vector<Connection*>* get_connections_out( ) final;
+    virtual double activate( double x ) = 0;
 
     /**
      *
-     * @return
+     * @param b
      */
-    size_t get_idx();
+    virtual void set_bias( double * b = nullptr );
+
 
-    /**
-     *
-     * @param value
-     */
-    void set_idx(size_t value);
 
 }; /* end of Neuron class */
 
@@ -238,27 +69,26 @@ public:
  * 'get_derivative' methods.
  */
 class IDifferentiable {
-    /**
-     * Calculates the partial derivative of the activation function
-     * with respect to the parameter and the current potential
-     * @param[in] param_idx Index of the parameter to calculate derivative of
-     * @return Partial derivative of the activation function according to the
-     * 'param_idx'-th parameter
-     */
-    virtual double activation_function_eval_partial_derivative(int param_idx) = 0;
 
     /**
      * Calculates the derivative with respect to the argument, ie the 'potential'
      * @return f'(x), where 'f(x)' is the activation function and 'x' = 'potential'
      */
-    virtual double activation_function_eval_derivative( ) = 0;
+    virtual double activation_function_eval_derivative( double x ) = 0;
+
+    /**
+     * Calculates the derivative with respect to the bias
+     * @return d/db f'(x), where 'f(x)' is the activation function, 'x' is the 'potential'
+     * and 'b' is the bias
+     */
+    virtual double activation_function_eval_derivative_bias( double x ) = 0;
 
     /**
      * Returns a Neuron pointer object with activation function being the partial derivative of
      * the activation function of this Neuron object with respect to the argument, i.e. 'potential'
      * @return
      */
-    virtual Neuron* get_derivative( );
+    virtual Neuron* get_derivative( ) = 0;
 
 }; /* end of IDifferentiable class */
 
diff --git a/src/Neuron/NeuronBinary.cpp b/src/Neuron/NeuronBinary.cpp
index 79aac6386112d4f13a2ed25fb450a24b794dcd5e..faad77d5a8e1c4524cbef168c40cb592b49f6743 100644
--- a/src/Neuron/NeuronBinary.cpp
+++ b/src/Neuron/NeuronBinary.cpp
@@ -4,31 +4,23 @@
 
 #include "NeuronBinary.h"
 
-Neuron* NeuronBinary::get_copy( ){
-    NeuronBinary* output = new NeuronBinary( this->activation_function_parameters[0] );
+NeuronBinary::NeuronBinary( double * b ) {
 
-    return output;
-}
-
-NeuronBinary::NeuronBinary(double threshold) {
+    this->bias = b;
 
-    this->n_activation_function_parameters = 2;
-    this->activation_function_parameters = new double[1];
-    this->activation_function_parameters[0] = threshold;
-
-    this->edges_in = new std::vector<Connection*>(0);
-    this->edges_out = new std::vector<Connection*>(0);
 }
 
-void NeuronBinary::activate( ) {
+double NeuronBinary::activate( double x ) {
 
-    double x = this->potential;
-    double threshold = this->activation_function_parameters[0];
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
+    }
 
-    if(x >= threshold){
-        this->state = 1.0;
+    if(x >= b){
+        return 1.0;
     }
     else{
-        this->state = 0.0;
+        return 0.0;
     }
 }
diff --git a/src/Neuron/NeuronBinary.h b/src/Neuron/NeuronBinary.h
index 6d27f015d58d20fd313df54670f86f77fb37399f..5c33c05ec1532f581b6e986aa4d330b5275f33b1 100644
--- a/src/Neuron/NeuronBinary.h
+++ b/src/Neuron/NeuronBinary.h
@@ -27,19 +27,17 @@ protected:
 
 public:
 
-    Neuron* get_copy( );
-
     /**
      * Default constructor for the binary Neuron
      * @param[in] threshold Denotes when the neuron is activated
      * When neuron potential exceeds 'threshold' value it becomes excited
      */
-    explicit NeuronBinary(double threshold = 0.0);
+    explicit NeuronBinary( double * b = nullptr );
 
     /**
      * Performs the activation function and stores the result into the 'state' property
      */
-    void activate( ) override;
+    double activate( double x ) override;
 
 };
 
diff --git a/src/Neuron/NeuronConstant.cpp b/src/Neuron/NeuronConstant.cpp
index a656c3f628dd7e4d0716435f363f9f6c0688d5b6..1fcacde80cfe059ad04ede291fffb815cd312e74 100644
--- a/src/Neuron/NeuronConstant.cpp
+++ b/src/Neuron/NeuronConstant.cpp
@@ -2,7 +2,30 @@
  * DESCRIPTION OF THE FILE
  *
  * @author Michal KravĨenko
- * @date 8.8.18 - 
+ * @date 8.8.18 -
  */
 
 #include "NeuronConstant.h"
+
+NeuronConstant::NeuronConstant( double c ) {
+
+    this->p = c;
+
+}
+
+double NeuronConstant::activate( double x ) {
+    return  this->p;
+}
+
+double NeuronConstant::activation_function_eval_derivative_bias( double x ) {
+    return 0.0;
+}
+
+double NeuronConstant::activation_function_eval_derivative( double x ) {
+    return 0.0;
+}
+
+Neuron* NeuronConstant::get_derivative() {
+    NeuronConstant* output = new NeuronConstant( );
+    return output;
+}
\ No newline at end of file
diff --git a/src/Neuron/NeuronConstant.h b/src/Neuron/NeuronConstant.h
index dff05a073c18122b477e414dd30585b39837c095..8da88c2c5dd97b2b4dc9645f4d240add780a09af 100644
--- a/src/Neuron/NeuronConstant.h
+++ b/src/Neuron/NeuronConstant.h
@@ -9,9 +9,52 @@
 #define INC_4NEURO_NEURONCONSTANT_H
 
 
-class NeuronConstant {
+#include "Neuron.h"
 
-};
+class NeuronConstant: public Neuron, public IDifferentiable {
+    friend class boost::serialization::access;
+
+protected:
+    template<class Archive>
+    void serialize(Archive & ar, const unsigned int version){
+        //TODO separate implementation to NeuronLinear.cpp!
+        ar & boost::serialization::base_object<Neuron>(*this);
+    };
+
+public:
+
+    /**
+     * Constructs the object of the Linear neuron with activation function
+     * f(x) = c
+     * @param[in] c Constant value
+     */
+    explicit NeuronConstant( double c = 0.0 );
 
+    /**
+     * Evaluates and returns 'c'
+     */
+    double activate( double x ) override;
 
+    /**
+     * Calculates the partial derivative of the activation function
+     * f(x) = c at point x
+     * @return Partial derivative of the activation function according to the
+     * 'bias' parameter. Returns 0.0
+     */
+    double activation_function_eval_derivative_bias( double x ) override;
+
+    /**
+     * Calculates d/dx of (c) at point x
+     * @return 0.0
+     */
+    double activation_function_eval_derivative( double x ) override;
+
+    /**
+     * Returns a pointer to a Neuron with derivative as its activation function
+     * @return
+     */
+    Neuron* get_derivative( ) override;
+private:
+    double p = 0.0;
+};
 #endif //INC_4NEURO_NEURONCONSTANT_H
diff --git a/src/Neuron/NeuronLinear.cpp b/src/Neuron/NeuronLinear.cpp
index e0ae690d9e1fd7c752a243fdf20f3a4c4e2166ba..f83bc12e0ab3be80c44fe20ca54c1fec99589934 100644
--- a/src/Neuron/NeuronLinear.cpp
+++ b/src/Neuron/NeuronLinear.cpp
@@ -2,64 +2,35 @@
 // Created by fluffymoo on 11.6.18.
 //
 
-#include <boost/serialization/base_object.hpp>
 #include "NeuronLinear.h"
 
 
-Neuron* NeuronLinear::get_copy( ){
-    NeuronLinear* output = new NeuronLinear( this->activation_function_parameters[0],  this->activation_function_parameters[1]);
 
-    return output;
-}
-
-NeuronLinear::NeuronLinear(double a, double b) {
-
-    this->n_activation_function_parameters = 2;
-    this->activation_function_parameters = new double[2];
+NeuronLinear::NeuronLinear( double * b ) {
 
-    this->activation_function_parameters[0] = a;
-    this->activation_function_parameters[1] = b;
-
-    this->edges_in = new std::vector<Connection*>(0);
-    this->edges_out = new std::vector<Connection*>(0);
+    this->bias = b;
 
 }
 
-void NeuronLinear::activate( ) {
-
-    double x = this->potential;
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-
-    this->state = b * x + a;
-
-}
-
-double NeuronLinear::activation_function_eval_partial_derivative(int param_idx) {
-
-    if(param_idx == 0){
-        return 1.0;
-    }
-    else if(param_idx == 1){
-        double x = this->potential;
-        return x;
+double NeuronLinear::activate( double x ) {
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
     }
 
-    return 0.0;
+    return  x + b;
 }
 
-double NeuronLinear::activation_function_eval_derivative( ) {
-    double b = this->activation_function_parameters[1];
+double NeuronLinear::activation_function_eval_derivative_bias( double x ) {
+    return 1.0;
+}
 
-    return b;
+double NeuronLinear::activation_function_eval_derivative( double x ) {
+    return 1.0;
 }
 
 Neuron* NeuronLinear::get_derivative() {
-    NeuronLinear* output = nullptr;
-
-    //derivative according to 'x'
-    output = new NeuronLinear(this->activation_function_parameters[1], 0.0);
-
+    NeuronConstant* output = new NeuronConstant( 1.0 );
     return output;
 }
 
diff --git a/src/Neuron/NeuronLinear.h b/src/Neuron/NeuronLinear.h
index ea3532431b5012d92259ced4aa08285834265a5a..40289aa6ca9301384348e3330b53070d6fce0775 100644
--- a/src/Neuron/NeuronLinear.h
+++ b/src/Neuron/NeuronLinear.h
@@ -11,6 +11,9 @@
 #define INC_4NEURO_NEURONLINEAR_H
 
 #include "Neuron.h"
+#include "NeuronConstant.h"
+#include <boost/serialization/base_object.hpp>
+
 
 /**
  * Linear neuron class - uses activation function in the form f(x)=a*x + b,
@@ -28,42 +31,37 @@ protected:
 
 public:
 
-    Neuron* get_copy( );
-
     /**
      * Constructs the object of the Linear neuron with activation function
-     * f(x) = b * x + a
-     * @param[in] a First coefficient, stored in activation_function_parameters[0]
-     * @param[in] b Second coefficient, stored in activation_function_parameters[1]
+     * f(x) = x + b
+     * @param[in] b Bias
      */
-    explicit NeuronLinear(double a = 0.0, double b = 0.0);
+    explicit NeuronLinear( double * b = nullptr );
 
     /**
-     * Evaluates 'b*x + a' and stores the result into the 'state' property
+     * Evaluates 'x + b' and stores the result into the 'state' property
      */
-    void activate( ) override;
+    double activate( double x ) override;
 
     /**
      * Calculates the partial derivative of the activation function
-     * f(x) = b*x + a at point x
-     * @param[in] param_idx Index of the parameter to calculate derivative of
+     * f(x) = x + b at point x
      * @return Partial derivative of the activation function according to the
-     * 'param_idx'-th parameter. For 'param_idx'=0 returns 1, for 'param_idx=1'
-     * return 'x' and otherwise returns 0.
+     * 'bias' parameter. Returns 1.0
      */
-    double activation_function_eval_partial_derivative(int param_idx) override;
+    double activation_function_eval_derivative_bias( double x ) override;
 
     /**
-     * Calculates d/dx of (b*x + a) at point x
-     * @return b
+     * Calculates d/dx of (x + b) at point x
+     * @return 1.0
      */
-    double activation_function_eval_derivative( ) override;
+    double activation_function_eval_derivative( double x ) override;
 
     /**
      * Returns a pointer to a Neuron with derivative as its activation function
      * @return
      */
-    Neuron* get_derivative() override;
+    Neuron* get_derivative( ) override;
 
 };
 
diff --git a/src/Neuron/NeuronLogistic.cpp b/src/Neuron/NeuronLogistic.cpp
index ac873d1a4488444d353291b3d382745001a75542..7dbd9d71c05e828c8d45e763026647f1db88722c 100644
--- a/src/Neuron/NeuronLogistic.cpp
+++ b/src/Neuron/NeuronLogistic.cpp
@@ -5,252 +5,128 @@
 
 #include "NeuronLogistic.h"
 
-Neuron* NeuronLogistic_d2::get_copy( ){
-    NeuronLogistic_d2* output = new NeuronLogistic_d2( this->activation_function_parameters[0],  this->activation_function_parameters[1]);
-
-    return output;
+NeuronLogistic_d2::NeuronLogistic_d2( double * b ) {
+    this->bias = b;
 }
 
-NeuronLogistic_d2::NeuronLogistic_d2(double a, double b) {
-
-    this->n_activation_function_parameters = 2;
-    this->activation_function_parameters = new double[2];
-
-    this->activation_function_parameters[0] = a;
-    this->activation_function_parameters[1] = b;
-
-    this->edges_in = new std::vector<Connection*>(0);
-    this->edges_out = new std::vector<Connection*>(0);
-}
-
-void NeuronLogistic_d2::activate( ) {
-    //[(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
-
-    double ex = std::pow(E, b - x);
-    double ex2 = std::pow(ex + 1.0, -a - 1.0);
-    double ex3 = 1.0 / (1.0 + ex);
-
-    this->state = -a * ex * ex2 * (1.0 - (a + 1.0) * ex * ex3);
-
-}
-
-double NeuronLogistic_d2::activation_function_eval_partial_derivative(int param_idx ) {
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
-
-    if(param_idx == 0){
-        //partial derivative according to parameter 'a'
-        //(e^b (e^(b - x) + 1)^(-a) (a^2 (-e^b) log(e^(b - x) + 1) + a e^x log(e^(b - x) + 1) + 2 a e^b - e^x))/(e^b + e^x)^2
-        double eb = std::pow(E, b);
-        double ex = std::pow(E, x);
-        double ebx= eb / ex;
-        double ebxa = std::pow(ebx + 1.0, -a);
-        double lbx = std::log(ebx + 1.0);
-
-        return eb * ebxa * (a * lbx * ( a * (-eb) + ex ) + 2.0 * a * eb - ex) / ((eb + ex) * (eb + ex));
-    }
-    else if(param_idx == 1){
-        //partial derivative according to parameter 'b'
-        //-(a e^b (e^(b - x) + 1)^(-a) (a^2 e^(2 b) - 3 a e^(b + x) - e^(b + x) + e^(2 x)))/(e^b + e^x)^3
-
-        double eb = std::pow(E, b);
-        double ex = std::pow(E, x);
-        double ebx= eb / ex;
-        double ebxa = std::pow(ebx + 1.0, -a);
+double NeuronLogistic_d2::activate( double x ) {
+    //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
 
-        return  - a * eb * ebxa * (a * a * eb * eb - 3.0 * a * ebx - ebx + ex * ex) / ((eb + ex) * (eb + ex) * (eb + ex));
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
     }
+    double ex = std::pow(E, x);
+    double eb = std::pow(E, b);
 
-    return 0.0;
+    return (eb*ex*(eb - ex))/((eb + ex)*(eb + ex)*(eb + ex));
 }
 
-double NeuronLogistic_d2::activation_function_eval_derivative() {
-    //(a e^b (1 + e^(b - x))^(-a) (a^2 e^(2 b) + e^(2 x) - e^(b + x) - 3 a e^(b + x)))/(e^b + e^x)^3
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
+double NeuronLogistic_d2::activation_function_eval_derivative_bias( double x ) {
+    //-(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
 
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
+    }
     double eb = std::pow(E, b);
     double ex = std::pow(E, x);
-    double ebx= eb / ex;
-    double ebxa = std::pow(ebx + 1.0, -a);
 
-    return  a * eb * ebxa * (a * a * eb * eb - 3.0 * a * ebx - ebx + ex * ex) / ((eb + ex) * (eb + ex) * (eb + ex));
+    return  -(eb*ex*(-4*eb*ex + eb*eb +ex*ex))/((eb + ex)*(eb + ex)*(eb + ex)*(eb + ex));
 }
 
-
-Neuron* NeuronLogistic_d1::get_copy( ){
-    NeuronLogistic_d1* output = new NeuronLogistic_d1( this->activation_function_parameters[0],  this->activation_function_parameters[1]);
-
-    return output;
+double NeuronLogistic_d2::activation_function_eval_derivative( double x ) {
+    //(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
+    return -this->activation_function_eval_derivative_bias( x );
 }
 
-NeuronLogistic_d1::NeuronLogistic_d1(double a, double b) {
-
-    this->n_activation_function_parameters = 2;
-    this->activation_function_parameters = new double[2];
-
-    this->activation_function_parameters[0] = a;
-    this->activation_function_parameters[1] = b;
-
-    this->edges_in = new std::vector<Connection*>(0);
-    this->edges_out = new std::vector<Connection*>(0);
+NeuronLogistic* NeuronLogistic_d2::get_derivative() {
+    //TODO maybe not the best way
+    return nullptr;
 }
 
-
-void NeuronLogistic_d1::activate( ) {
-    //a*e^(b - x)*(1 + e^(b - x))^(-1 - a)
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
-
-    double ex = std::pow(E, b - x);
-    this->state = a * ex * std::pow(1.0 + ex, -a - 1.0);
-
+NeuronLogistic_d1::NeuronLogistic_d1( double * b ) {
+    this->bias = b;
 }
 
-double NeuronLogistic_d1::activation_function_eval_partial_derivative(int param_idx ) {
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
 
-    if(param_idx == 0){
-        //partial derivative according to parameter 'a'
-        //e^(b - x) (1 + e^(b - x))^(-1 - a)[1 - a log(1 + e^(b - x))]
-        double ex = std::pow(E, b - x);
-        double ex2= std::pow(1.0 + ex, -1.0 - a);
-
-        return ex * ex2 * (1.0 - a * std::log(1.0 + ex));
-    }
-    else if(param_idx == 1){
-        //partial derivative according to parameter 'b'
-        //[(-1 - a) e^(b-x) (1 + e^(b - x))^(-1) + 1] * a e^(b - x) (1 + e^(b - x))^(-1 - a)
-        double ex = std::pow(E, b - x);
-        double ex2 = std::pow(ex + 1.0, -a - 1.0);
-        double ex3 = 1.0 / (1.0 + ex);
-
-        return a * ex * ex2 * (1.0 - (a + 1.0) * ex * ex3);
+double NeuronLogistic_d1::activate( double x ) {
+    //e^(b - x)/(e^(b - x) + 1)^2
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
     }
+    double ex = std::pow(E, x);
+    double eb = std::pow(E, b);
+    double d = (eb/ex);
 
-    return 0.0;
+    return d/((d + 1)*(d + 1));
 }
 
-double NeuronLogistic_d1::activation_function_eval_derivative() {
-    //[(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
+double NeuronLogistic_d1::activation_function_eval_derivative_bias( double x ) {
+    //(e^(b + x) (e^x - e^b))/(e^b + e^x)^3
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
+    }
 
-    double ex = std::pow(E, b - x);
-    double ex2 = std::pow(ex + 1.0, -a - 1.0);
-    double ex3 = 1.0 / (1.0 + ex);
+    double ex = std::pow(E, x);
+    double eb = std::pow(E, b);
 
-    return -a * ex * ex2 * (1.0 - (a + 1.0) * ex * ex3);
+    return (eb*ex* (ex - eb))/((eb + ex)*(eb + ex)*(eb + ex));
 }
 
-NeuronLogistic_d2* NeuronLogistic_d1::get_derivative() {
+double NeuronLogistic_d1::activation_function_eval_derivative( double x ) {
+    //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+    return -this->activation_function_eval_derivative_bias( x );
+}
 
-    //[(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)
+NeuronLogistic* NeuronLogistic_d1::get_derivative( ) {
+    //(e^(b + x) (e^b - e^x))/(e^b + e^x)^3
     NeuronLogistic_d2* output = nullptr;
 
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    output = new NeuronLogistic_d2(a, b);
+    output = new NeuronLogistic_d2( this->bias );
 
     return output;
-
 }
 
-
-Neuron* NeuronLogistic::get_copy( ){
-    NeuronLogistic* output = new NeuronLogistic( this->activation_function_parameters[0],  this->activation_function_parameters[1]);
-
-    return output;
+NeuronLogistic::NeuronLogistic( double * b ) {
+    this->bias = b;
 }
 
-NeuronLogistic::NeuronLogistic(double a, double b) {
-
-    this->n_activation_function_parameters = 2;
-    this->activation_function_parameters = new double[2];
-
-    this->activation_function_parameters[0] = a;
-    this->activation_function_parameters[1] = b;
-
-    this->edges_in = new std::vector<Connection*>(0);
-    this->edges_out = new std::vector<Connection*>(0);
-}
-
-void NeuronLogistic::activate( ) {
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
+double NeuronLogistic::activate( double x ) {
+    //(1 + e^(-x + b))^(-1)
 
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
+    }
     double ex = std::pow(E, b - x);
-    this->state = std::pow(1.0 + ex, -a);
-
+    return std::pow(1.0 + ex, -1);
 }
 
-double NeuronLogistic::activation_function_eval_partial_derivative(int param_idx ) {
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
-
-    if(param_idx == 0){
-        //partial derivative according to parameter 'a'
-        double ex = std::pow(E, b - x);
-
-        double exa= -std::pow(ex + 1.0, -a);
-
-        return exa * std::log(ex + 1.0);
+double NeuronLogistic::activation_function_eval_derivative_bias( double x ) {
+    //-e^(b - x)/(e^(b - x) + 1)^2
+    double b = 0.0;
+    if( this->bias ){
+        b = *this->bias;
     }
-    else if(param_idx == 1){
-        //partial derivative according to parameter 'b'
-        /**
-         * TODO
-         * Could be write as activation_function_get_derivative() * -1
-         */
-        double ex = std::pow(E, b - x);
-        double ex2 = std::pow(ex + 1.0, -a - 1.0);
-
-        return -a * ex * ex2;
-    }
-
-    return 0.0;
+    double ex = std::pow(E, b - x);
 
+    return -ex/((ex + 1)*(ex + 1));
 }
 
 
-double NeuronLogistic::activation_function_eval_derivative( ) {
-
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-    double x = this->potential;
-
-    double ex = std::pow(E, b - x);
-    double ex2 = std::pow(ex + 1.0, -a - 1.0);
-
-    return a * ex * ex2;
+double NeuronLogistic::activation_function_eval_derivative( double x ) {
+    //e^(b - x)/(e^(b - x) + 1)^2
+    return -this->activation_function_eval_derivative_bias( x );
 
 }
 
-NeuronLogistic_d1* NeuronLogistic::get_derivative() {
+NeuronLogistic* NeuronLogistic::get_derivative( ) {
 
     NeuronLogistic_d1 *output = nullptr;
-    double a = this->activation_function_parameters[0];
-    double b = this->activation_function_parameters[1];
-
-    output = new NeuronLogistic_d1(a, b);
-    output->set_potential( this->potential );
+    output = new NeuronLogistic_d1( this->bias );
 
     return output;
 
diff --git a/src/Neuron/NeuronLogistic.h b/src/Neuron/NeuronLogistic.h
index c290870106471d27580ab28f94fe4e3f3cedc85c..af66b545e3f693cfec1541d537e57aa623abf26d 100644
--- a/src/Neuron/NeuronLogistic.h
+++ b/src/Neuron/NeuronLogistic.h
@@ -14,51 +14,51 @@
 #include "Neuron.h"
 #include "../constants.h"
 
-class NeuronLogistic_d2:public Neuron, public IDifferentiable {
+
+class NeuronLogistic:public Neuron, public IDifferentiable {
     friend class boost::serialization::access;
+
 protected:
     template<class Archive>
     void serialize(Archive & ar, const unsigned int version){
-        //TODO separate implementation to NeuronLogistic_d1.cpp!
+        //TODO separate implementation to NeuronLogistic.cpp!
         ar & boost::serialization::base_object<Neuron>(*this);
     };
-public:
-
-    Neuron* get_copy( );
 
+public:
     /**
      * Constructs the object of the Logistic neuron with activation function
-     * f(x) = [(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)
-     * @param[in] a First coefficient, stored in activation_function_parameters[0]
-     * @param[in] b Second coefficient, stored in activation_function_parameters[1]
+     * f(x) = (1 + e^(-x + b))^(-1)
      */
-    explicit NeuronLogistic_d2(double a = 0.0, double b = 0.0);
+    explicit NeuronLogistic( double * b = nullptr );
 
     /**
-     * Evaluates 'a*e^(b - x)*(1 + e^(b - x))^(-1 - a)' and stores the result into the 'state' property
+     * Evaluates '(1 + e^(-x + b))^(-1)' and stores the result into the 'state' property
      */
-    void activate( ) override;
+    virtual double activate( double x ) override;
 
     /**
      * Calculates the partial derivative of the activation function
-     * f(x) = [(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)
-     * @param[in] param_idx Index of the parameter to calculate derivative of
+     * f(x) = (1 + e^(-x + b))^(-1)
      * @return Partial derivative of the activation function according to the
-     * 'param_idx'-th parameter.
+     * bias, returns: -e^(b - x)/(e^(b - x) + 1)^2
      */
-    double activation_function_eval_partial_derivative(int param_idx) override;
-
+    virtual double activation_function_eval_derivative_bias( double x ) override;
     /**
-     * Calculates d/dx of  [(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)
-     * @return (a e^b (1 + e^(b - x))^(-a) (a^2 e^(2 b) + e^(2 x) - e^(b + x) - 3 a e^(b + x)))/(e^b + e^x)^3
+     * Calculates d/dx of (1 + e^(-x + b))^(-1)
+     * @return e^(b - x)/(e^(b - x) + 1)^2
      */
-    double activation_function_eval_derivative( ) override;
+    virtual double activation_function_eval_derivative( double x ) override;
 
+    /**
+     * Returns a pointer to a Neuron with derivative as its activation function
+     * @return
+     */
+    virtual NeuronLogistic* get_derivative( ) override;
 };
 
 
-
-class NeuronLogistic_d1:public Neuron, public IDifferentiable {
+class NeuronLogistic_d1:public NeuronLogistic {
     friend class boost::serialization::access;
 protected:
     template<class Archive>
@@ -68,90 +68,86 @@ protected:
     };
 public:
 
-    Neuron* get_copy( );
-
     /**
      * Constructs the object of the Logistic neuron with activation function
-     * f(x) = a*e^(b - x)*(1 + e^(b - x))^(-1 - a)
-     * @param[in] a First coefficient, stored in activation_function_parameters[0]
-     * @param[in] b Second coefficient, stored in activation_function_parameters[1]
+     * f(x) = e^(b - x)/(e^(b - x) + 1)^2
+     * @param[in] b Bias
      */
-    explicit NeuronLogistic_d1(double a = 0.0, double b = 0.0);
+    explicit NeuronLogistic_d1( double * b = nullptr );
 
     /**
-     * Evaluates 'a*e^(b - x)*(1 + e^(b - x))^(-1 - a)' and stores the result into the 'state' property
+     * Evaluates 'e^(b - x)/(e^(b - x) + 1)^2' and returns the result
      */
-    void activate( ) override;
+    virtual double activate( double x ) override;
 
     /**
      * Calculates the partial derivative of the activation function
-     * f(x) = a*e^(b - x)*(1 + e^(b - x))^(-1 - a)
-     * @param[in] param_idx Index of the parameter to calculate derivative of
+     * f(x) = e^(b - x)/(e^(b - x) + 1)^2
      * @return Partial derivative of the activation function according to the
-     * 'param_idx'-th parameter.
+     * bias, returns: (e^(b + x) (e^x - e^b))/(e^b + e^x)^3
      */
-    double activation_function_eval_partial_derivative(int param_idx) override;
+    virtual double activation_function_eval_derivative_bias( double x ) override;
 
     /**
-     * Calculates d/dx of  [a*e^(b - x)*(1 + e^(b - x))^(-1 - a)]
-     * @return [[(1 + a) e^(b-x) (1 + e^(b - x))^(-1) - 1]a e^(b - x) (1 + e^(b - x))^(-1 - a)]
+     * Calculates d/dx of  e^(b - x)*(1 + e^(b - x))^(-2)
+     * @return  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
      */
-    double activation_function_eval_derivative( ) override;
+    virtual double activation_function_eval_derivative( double x ) override;
 
     /**
      * Returns a pointer to a Neuron with derivative as its activation function
      * @return
      */
-    NeuronLogistic_d2* get_derivative() override;
+    virtual NeuronLogistic* get_derivative( ) override;
 };
 
 
-class NeuronLogistic:public Neuron, public IDifferentiable {
-    friend class boost::serialization::access;
 
+
+
+class NeuronLogistic_d2:public NeuronLogistic_d1 {
+    friend class boost::serialization::access;
 protected:
     template<class Archive>
     void serialize(Archive & ar, const unsigned int version){
-        //TODO separate implementation to NeuronLogistic.cpp!
+        //TODO separate implementation to NeuronLogistic_d1.cpp!
         ar & boost::serialization::base_object<Neuron>(*this);
     };
-
 public:
 
-    Neuron* get_copy( );
     /**
      * Constructs the object of the Logistic neuron with activation function
-     * f(x) = (1 + e^(-x + b))^(-a)
-     * @param[in] a First coefficient, stored in activation_function_parameters[0]
-     * @param[in] b Second coefficient, stored in activation_function_parameters[1]
+     * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
      */
-    explicit NeuronLogistic(double a = 0.0, double b = 0.0);
+    explicit NeuronLogistic_d2( double * b = nullptr );
 
     /**
-     * Evaluates '(1 + e^(-x + b))^(-a)' and stores the result into the 'state' property
+     * Evaluates '(e^(b + x) (e^b - e^x))/(e^b + e^x)^3' and returns the result
      */
-    void activate( ) override;
+    virtual double activate( double x ) override;
 
     /**
      * Calculates the partial derivative of the activation function
-     * f(x) = (1 + e^(-x + b))^(-a)
-     * @param[in] param_idx Index of the parameter to calculate derivative of
+     * f(x) = (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
      * @return Partial derivative of the activation function according to the
-     * 'param_idx'-th parameter.
+     * bias, returns: -(e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
      */
-    double activation_function_eval_partial_derivative(int param_idx) override;
+    virtual double activation_function_eval_derivative_bias( double x ) override;
+
     /**
-     * Calculates d/dx of (1 + e^(-x + b))^(-a)
-     * @return a * e^(b - x) * [e^(b - x) + 1]^(-a)
+     * Calculates d/dx of  (e^(b + x) (e^b - e^x))/(e^b + e^x)^3
+     * @return (e^(b + x) (-4 e^(b + x) + e^(2 b) + e^(2 x)))/(e^b + e^x)^4
      */
-    double activation_function_eval_derivative( ) override;
+    virtual double activation_function_eval_derivative( double x ) override;
 
     /**
-     * Returns a pointer to a Neuron with derivative as its activation function
+     *
      * @return
      */
-    NeuronLogistic_d1* get_derivative() override;
+    virtual NeuronLogistic* get_derivative( ) override;
+
 };
 
 
+
 #endif //INC_4NEURO_NEURONLOGISTIC_H
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 7ed3853c03fdfebab0585e3690d0fedd0ce550f6..562c27755bc4dea4367646113db79e8c39bc8892 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -7,24 +7,26 @@
 
 #include "DESolver.h"
 
-MultiIndex::MultiIndex(unsigned int dimension) {
+//TODO add support for multiple unknown functions
+
+MultiIndex::MultiIndex(size_t dimension) {
     this->dim = dimension;
     this->partial_derivatives_degrees.resize( this->dim );
     std::fill( this->partial_derivatives_degrees.begin(), this->partial_derivatives_degrees.end(), 0 );
 }
 
-void MultiIndex::set_partial_derivative(unsigned int index, unsigned int value) {
+void MultiIndex::set_partial_derivative(size_t index, size_t value) {
     this->partial_derivatives_degrees[ index ] = value;
 }
 
-std::vector<unsigned int>* MultiIndex::get_partial_derivatives_degrees() {
+std::vector<size_t>* MultiIndex::get_partial_derivatives_degrees() {
     return &this->partial_derivatives_degrees;
 }
 
 bool MultiIndex::operator<(const MultiIndex &rhs) const {
     if(dim < rhs.dim){ return true; }
 
-    for(unsigned int i = 0; i < dim; ++i){
+    for(size_t i = 0; i < dim; ++i){
         if(partial_derivatives_degrees[i] < rhs.partial_derivatives_degrees[i]){
             return true;
         }
@@ -36,18 +38,18 @@ std::string MultiIndex::to_string( )const {
     std::string output;
     char buff[ 255 ];
 
-    for( unsigned int i = 0; i < this->dim - 1; ++i){
-        sprintf(buff, "%d, ", this->partial_derivatives_degrees[i]);
+    for( size_t i = 0; i < this->dim - 1; ++i){
+        sprintf(buff, "%d, ", (int)this->partial_derivatives_degrees[i]);
         output.append( buff );
     }
-    sprintf(buff, "%d", this->partial_derivatives_degrees[this->dim - 1]);
+    sprintf(buff, "%d", (int)this->partial_derivatives_degrees[this->dim - 1]);
     output.append( buff );
 
     return output;
 }
 
-unsigned int MultiIndex::get_degree() const{
-    unsigned int output = 0;
+size_t MultiIndex::get_degree() const{
+    size_t output = 0;
 
     for( auto i: this->partial_derivatives_degrees ){
         output += i;
@@ -57,82 +59,86 @@ unsigned int MultiIndex::get_degree() const{
 }
 
 
-DESolver::DESolver( unsigned int n_equations, unsigned int n_inputs, unsigned int m, unsigned int n_outputs ) {
+DESolver::DESolver( size_t n_equations, size_t n_inputs, size_t m ) {
 
-    if( m <= 0 || n_inputs <= 0 || n_outputs <= 0 || n_equations <= 0 ){
+    if( m <= 0 || n_inputs <= 0 || n_equations <= 0 ){
         throw std::invalid_argument("Parameters 'm', 'n_equations', 'n_inputs' and 'n_outputs' must be greater than zero!");
     }
-    printf("Differential Equation Solver with %d equations\n--------------------------------------------------------------------------\n", n_equations);
+    printf("Differential Equation Solver with %d equations\n--------------------------------------------------------------------------\n", (int)n_equations);
 
-    printf("Constructing NN structure representing the solution [%d input neurons][%d inner neurons][%d output neurons]...\n", n_inputs, m, n_outputs);
+    printf("Constructing NN structure representing the solution [%d input neurons][%d inner neurons]...\n", (int)n_inputs, (int)m);
 
     this->dim_i = n_inputs;
-    this->dim_o = n_outputs;
     this->dim_inn= m;
     this->n_equations = n_equations;
 
-    this->solution = new NeuralNetwork();
+    this->solution = new NeuralNetwork( (n_inputs + 1) * m, m );
 
     this->solution_inner_neurons = new std::vector<NeuronLogistic*>(0);
     this->solution_inner_neurons->reserve( m );
 
     /* input neurons */
     std::vector<size_t> input_set( this->dim_i );
-    for( unsigned int i = 0; i < this->dim_i; ++i ){
-        NeuronLinear *input_i = new NeuronLinear(0.0, 1.0);  //f(x) = x
-        this->solution->add_neuron( input_i );
-        input_set[i] = i;
+    size_t idx;
+    for( size_t i = 0; i < this->dim_i; ++i ){
+        NeuronLinear *input_i = new NeuronLinear( );  //f(x) = x
+        idx = this->solution->add_neuron_no_bias( input_i );
+        input_set[i] = idx;
     }
     this->solution->specify_input_neurons( input_set );
+    size_t first_input_neuron = input_set[0];
 
-    /* output neurons */
-    std::vector<size_t> output_set( this->dim_o );
-    for( unsigned int i = 0; i < this->dim_o; ++i ){
-        NeuronLinear *output_i = new NeuronLinear(0.0, 1.0);  //f(x) = x
-        this->solution->add_neuron( output_i );
-        output_set[i] = i + this->dim_i;
-    }
+    /* output neuron */
+    std::vector<size_t> output_set( 1 );
+    idx = this->solution->add_neuron_no_bias( new NeuronLinear( ) );//f(x) = x
+    output_set[0] = idx;
     this->solution->specify_output_neurons( output_set );
+    size_t first_output_neuron = idx;
 
     /* inner neurons */
-    for(unsigned int i = 0; i < this->dim_inn; ++i){
-        NeuronLogistic *inner_i = new NeuronLogistic(1.0, 0.0); //f(x) = 1.0 / (1.0 + e^(-x))
+    size_t first_inner_neuron = 0;
+    for(size_t i = 0; i < this->dim_inn; ++i){
+        NeuronLogistic *inner_i = new NeuronLogistic( ); //f(x) = 1.0 / (1.0 + e^(-x))
         this->solution_inner_neurons->push_back( inner_i );
-        this->solution->add_neuron( inner_i );
+        idx = this->solution->add_neuron( inner_i );
+
+        if(i == 0){
+            first_inner_neuron = idx;
+        }
     }
 
+
     /* connections between input neurons and inner neurons */
     size_t weight_idx;
-    unsigned int dim_shift = this->dim_i + this->dim_o;
-    for(unsigned int i = 0; i < this->dim_i; ++i){
-        for(unsigned int j = 0; j < this->dim_inn; ++j){
-            weight_idx = this->solution->add_connection_simple(i, dim_shift + j);
-            printf("  adding a connection between input neuron %2d and inner neuron %2d, weight index %d\n", i, j, weight_idx);
+    for(size_t i = 0; i < this->dim_i; ++i){
+        for(size_t j = 0; j < this->dim_inn; ++j){
+            weight_idx = this->solution->add_connection_simple(first_input_neuron + i, first_inner_neuron + j);
+            printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], weight index %3d\n", (int)i, (int)(first_input_neuron + i), (int)j, (int)(first_inner_neuron + j), (int)weight_idx);
         }
     }
 
     /* connections between inner neurons and output neurons */
-    for(unsigned int i = 0; i < this->dim_inn; ++i){
-        for(unsigned int j = 0; j < this->dim_o; ++j){
-            weight_idx = this->solution->add_connection_simple(dim_shift + i, this->dim_i + j);
-            printf("  adding a connection between inner neuron %2d and output neuron %2d, weight index %d\n", i, j, weight_idx);
-        }
+    for(size_t i = 0; i < this->dim_inn; ++i){
+        weight_idx = this->solution->add_connection_simple(first_inner_neuron + i, first_output_neuron);
+        printf("  adding a connection between inner neuron %2d[%2d] and output neuron %2d[%2d], weight index %3d\n", (int)i, (int)(first_inner_neuron + i), 0, (int)(first_output_neuron ), (int)weight_idx);
     }
 
     MultiIndex initial_mi(this->dim_i);
 
     this->map_multiindices2nn[initial_mi] = this->solution;
 
-    this->differential_equations = new std::vector<NeuralNetworkSum*>(this->n_equations);
+    this->differential_equations = new std::vector<NeuralNetworkSum*>(0);
+    this->differential_equations->reserve(this->n_equations);
 
     for( unsigned int i = 0; i < this->n_equations; ++i ){
-        this->differential_equations->at( i ) = new NeuralNetworkSum();
+        NeuralNetworkSum *new_sum = new NeuralNetworkSum();
+        this->differential_equations->push_back(new_sum);
     }
 
     this->errors_functions_types = new std::vector<ErrorFunctionType >(this->n_equations);
     this->errors_functions_data_sets = new std::vector<DataSet*>(this->n_equations);
 
-    printf("done\n\n");
+    printf("done\n");
 
 }
 
@@ -143,11 +149,6 @@ DESolver::~DESolver() {
         this->solution_inner_neurons = nullptr;
     }
 
-    if( this->differential_equations ){
-        delete this->differential_equations;
-        this->differential_equations = nullptr;
-    }
-
     if( this->errors_functions_types ){
         delete this->errors_functions_types;
         this->errors_functions_types = nullptr;
@@ -158,27 +159,40 @@ DESolver::~DESolver() {
         this->errors_functions_data_sets = nullptr;
     }
 
+    if(this->differential_equations){
+        for(auto nns: *this->differential_equations){
+            delete nns;
+        }
+        delete this->differential_equations;
+        this->differential_equations = nullptr;
+    }
+
 
+    for(auto nn: this->map_multiindices2nn){
+        NeuralNetwork * n_to_delete = nn.second;
+        delete n_to_delete;
+    }
 
 }
 
-void DESolver::add_to_differential_equation( unsigned int equation_idx, MultiIndex &alpha, double beta ) {
+//TODO more efficient representation of the functions (large portion of the structure is the same for all partial derivatives)
+void DESolver::add_to_differential_equation( size_t equation_idx, MultiIndex &alpha, double beta ) {
 
     if( equation_idx >= this->n_equations ){
         throw std::invalid_argument( "The provided equation index is too large!" );
     }
 
-    unsigned int derivative_degree = alpha.get_degree( );
+    size_t derivative_degree = alpha.get_degree( );
 
     if( derivative_degree > 2 ){
         throw std::invalid_argument("The supplied multi-index represents partial derivative of order higher than 2! (Valid degree is at most 2)\n");
     }
 
     /* retrieve indices of the variables according to which we perform the derivations ( applicable to any order, not just 2 or less )*/
-    std::vector<unsigned int> partial_derivative_indices;
+    std::vector<size_t> partial_derivative_indices;
     partial_derivative_indices.reserve(derivative_degree);
-    for( unsigned int i = 0; i < alpha.get_partial_derivatives_degrees()->size( ); ++i ){
-        unsigned int degree = alpha.get_partial_derivatives_degrees()->at( i );
+    for( size_t i = 0; i < alpha.get_partial_derivatives_degrees()->size( ); ++i ){
+        size_t degree = alpha.get_partial_derivatives_degrees()->at( i );
 
         while( degree > 0 ){
 
@@ -188,132 +202,114 @@ void DESolver::add_to_differential_equation( unsigned int equation_idx, MultiInd
         }
     }
 
-
-
-//    for( auto idx: map_multiindices2nn ){
-////        printf("%s\n", idx.first.to_string().c_str());
-//        printf("Is %s < %s? [%d]\n", idx.first.to_string().c_str(), alpha.to_string().c_str(), idx.first < alpha );
-//        printf("Is %s < %s? [%d]\n", alpha.to_string().c_str(), idx.first.to_string().c_str(), alpha < idx.first );
-//    }
-
     NeuralNetwork *new_net = nullptr;
     /* we check whether the new multi-index is already present */
     if(map_multiindices2nn.find( alpha ) != map_multiindices2nn.end()){
         new_net = map_multiindices2nn[ alpha ];
         this->differential_equations->at( equation_idx )->add_network( new_net, beta );
-        printf("Adding an existing partial derivative (multi-index: %s) to equation %d with coefficient %f\n", alpha.to_string().c_str(), equation_idx, beta);
+        printf("\nAdding an existing partial derivative (multi-index: %s) to equation %d with coefficient %f\n", alpha.to_string().c_str(), (int)equation_idx, beta);
         return;
     }
-    printf("Adding a new partial derivative (multi-index: %s) to equation %d with coefficient %f\n", alpha.to_string().c_str(), equation_idx, beta);
+    printf("\nAdding a new partial derivative (multi-index: %s) to equation %d with coefficient %f\n", alpha.to_string().c_str(), (int)equation_idx, beta);
 
     /* we need to construct a new neural network */
-     new_net = new NeuralNetwork( );
-     new_net->set_weight_array( this->solution->get_weights_ptr() );
+    new_net = new NeuralNetwork( );
+    new_net->set_parameter_space_pointers( *this->solution );
 
     /* input neurons */
     std::vector<size_t> input_set( this->dim_i );
-    for( unsigned int i = 0; i < this->dim_i; ++i ){
-        NeuronLinear *input_i = new NeuronLinear(0.0, 1.0);  //f(x) = x
-        new_net->add_neuron( input_i );
-        input_set[i] = i;
+    size_t idx;
+    for( size_t i = 0; i < this->dim_i; ++i ){
+        NeuronLinear *input_i = new NeuronLinear( );  //f(x) = x
+        idx = new_net->add_neuron_no_bias( input_i );
+        input_set[i] = idx;
     }
     new_net->specify_input_neurons( input_set );
+    size_t first_input_neuron = input_set[0];
+
 
     /* output neurons */
-    std::vector<size_t> output_set( this->dim_o );
-    for( unsigned int i = 0; i < this->dim_o; ++i ){
-        NeuronLinear *output_i = new NeuronLinear(0.0, 1.0);  //f(x) = x
-        new_net->add_neuron( output_i );
-        output_set[i] = i + this->dim_i;
-    }
+    std::vector<size_t> output_set( 1 );
+    idx = new_net->add_neuron_no_bias( new NeuronLinear( ) );//f(x) = x
+    output_set[0] = idx;
     new_net->specify_output_neurons( output_set );
+    size_t first_output_neuron = idx;
 
     /* the new partial derivative has degree of at least one */
-    for( unsigned int i = 0; i < this->dim_inn; ++i ){
-        if( derivative_degree == 1 ){
-            NeuronLogistic_d1 *new_inn_neuron = this->solution_inner_neurons->at( i )->get_derivative( );
-            new_net->add_neuron( new_inn_neuron );
+    size_t first_inner_neuron = 0;
+    NeuronLogistic *n_ptr = nullptr, *n_ptr2 = nullptr;
+    for( size_t i = 0; i < this->dim_inn; ++i ){
+        n_ptr = this->solution_inner_neurons->at( i );
+
+        for( size_t j = 0; j < derivative_degree; ++j){
+            n_ptr2 = n_ptr;
+
+            n_ptr = n_ptr->get_derivative( );
+
+            if(j > 0){
+                delete n_ptr2;
+                n_ptr2 = nullptr;
+            }
+
         }
-        else if( derivative_degree == 2 ){
-            NeuronLogistic_d2 *new_inn_neuron = this->solution_inner_neurons->at( i )->get_derivative( )->get_derivative( );
-            new_net->add_neuron( new_inn_neuron );
+        idx = new_net->add_neuron( n_ptr );
+
+        if(i == 0){
+            first_inner_neuron = idx;
         }
     }
 
     /* identity neurons serving as a 'glue'*/
-    for(unsigned int i = 0; i < derivative_degree * this->dim_o * this->dim_inn; ++i){
-        NeuronLinear *new_neuron = new NeuronLinear(0.0, 1.0);  //f(x) = x
-        new_net->add_neuron( new_neuron );
+    size_t first_glue_neuron = idx + 1;
+    for(size_t i = 0; i < derivative_degree * this->dim_inn; ++i){
+        idx = new_net->add_neuron_no_bias( new NeuronLinear( ) ); //f(x) = x
     }
 
     /* connections between input neurons and inner neurons */
-    unsigned int dim_shift = this->dim_i + this->dim_o;
-    size_t weight_idx = 0;
-    for(unsigned int i = 0; i < this->dim_i; ++i){
-        for(unsigned int j = 0; j < this->dim_inn; ++j){
-            printf("  adding a connection between input neuron %2d and inner neuron %2d, weight index %d\n", i, j, weight_idx);
-            new_net->add_connection_simple(i, dim_shift + j, weight_idx);
-            weight_idx++;
+    size_t connection_idx = 0;
+    for(size_t i = 0; i < this->dim_i; ++i){
+        for(size_t j = 0; j < this->dim_inn; ++j){
+            printf("  adding a connection between input neuron %2d[%2d] and inner neuron  %2d[%2d], connection index: %3d\n", (int)i, (int)(first_input_neuron + i), (int)j, (int)(first_inner_neuron + j), (int)connection_idx);
+            new_net->add_existing_connection(first_input_neuron + i, first_inner_neuron + j, connection_idx, *this->solution );
+            connection_idx++;
         }
     }
+    printf("----------------------------------------------------------------------------------------------------\n");
 
     /* connections between inner neurons and the first set of 'glueing' neurons */
-//    unsigned int glue_shift = dim_shift + this->dim_inn;
-    unsigned int glue_shift = dim_shift + this->dim_inn;
-//    size_t weight_idx_mem = weight_idx;
-    for(unsigned int i = 0; i < this->dim_inn; ++i){
-        for(unsigned int j = 0; j < this->dim_o; ++j){
-            printf("  adding a connection between inner neuron %2d and glue neuron %2d, weight index %d\n", i, i * this->dim_o + j, weight_idx);
-            new_net->add_connection_simple(dim_shift + i, glue_shift + i * this->dim_o + j, weight_idx );
-            weight_idx++;
-        }
+    for(size_t i = 0; i < this->dim_inn; ++i){
+        printf("  adding a connection between inner neuron %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n", (int)i, (int)(first_inner_neuron + i), (int)i, (int)(first_glue_neuron + i), (int)connection_idx);
+        new_net->add_existing_connection(first_inner_neuron + i, first_glue_neuron + i, connection_idx, *this->solution );
+        connection_idx++;
     }
-
-    if( derivative_degree == 1 ){
-        /* connections from the first set of glueing neurons to the output neurons */
-        unsigned int pi = partial_derivative_indices[0];
-
-        for(unsigned int i = 0; i < this->dim_inn; ++i){
-            unsigned int local_weight_idx = pi * this->dim_inn + i;
-
-            for(unsigned int j = 0; j < this->dim_o; ++j){
-                printf("  adding a connection between glue neuron %2d and output neuron %2d, weight index %d\n", i * this->dim_o + j, j, local_weight_idx);
-                new_net->add_connection_simple(glue_shift + i * this->dim_o + j, j + this->dim_i, local_weight_idx );
-            }
+    printf("----------------------------------------------------------------------------------------------------\n");
+
+    size_t pd_idx;
+    /* connections between glueing neurons */
+    for(size_t di = 0; di < derivative_degree - 1; ++di){
+        pd_idx = partial_derivative_indices[di];/* partial derivative index */
+        for(size_t i = 0; i < this->dim_inn; ++i){
+            connection_idx = pd_idx * this->dim_inn + i;
+            printf("  adding a connection between glue neuron  %2d[%2d] and glue neuron   %2d[%2d], connection index: %3d\n", (int)(i + (di)*this->dim_inn), (int)(first_glue_neuron + i + (di)*this->dim_inn), (int)(i + (di + 1)*this->dim_inn), (int)(first_glue_neuron + i + (di + 1)*this->dim_inn), (int)connection_idx);
+            new_net->add_existing_connection(first_glue_neuron + i + (di)*this->dim_inn, first_glue_neuron + i + (di + 1)*this->dim_inn, connection_idx, *this->solution );
         }
     }
-    else if( derivative_degree == 2 ){
-        /* connections from the first set of glueing neurons towards the second set of glueing neurons */
-        unsigned int pi = partial_derivative_indices[0];
-        unsigned int glue_shift_2 = this->dim_inn + this->dim_i + this->dim_o + this->dim_inn * this->dim_o;
-
-        for(unsigned int i = 0; i < this->dim_inn; ++i){
-            unsigned int local_weight_idx = pi * this->dim_inn + i;
-
-            for(unsigned int j = 0; j < this->dim_o; ++j){
-                printf("  adding a connection between glue neuron[1] %2d and glue neuron[2] %2d, weight index %d\n", i * this->dim_o + j, i * this->dim_o + j, local_weight_idx);
-                new_net->add_connection_simple(glue_shift + i * this->dim_o + j, glue_shift_2 + i * this->dim_o + j, local_weight_idx );
-            }
-        }
-
-        /* connections from the second set of glueing neurons towards the output neurons */
-        pi = partial_derivative_indices[1];
-
-        for(unsigned int i = 0; i < this->dim_inn; ++i){
-            unsigned int local_weight_idx = pi * this->dim_inn + i;
-
-            for(unsigned int j = 0; j < this->dim_o; ++j){
-                printf("  adding a connection between glue neuron[2] %2d and output neuron %2d, weight index %d\n", i * this->dim_o + j, j, local_weight_idx);
-                new_net->add_connection_simple(glue_shift_2 + i * this->dim_o + j, this->dim_i + j, local_weight_idx );
-            }
-        }
+    printf("----------------------------------------------------------------------------------------------------\n");
+
+    /* connection between the layer of glueing neurons toward the output neuron */
+    pd_idx = partial_derivative_indices[derivative_degree - 1];/* partial derivative index */
+    for(size_t i = 0; i < this->dim_inn; ++i){
+        connection_idx = pd_idx * this->dim_inn + i;
+        printf("  adding a connection between glue neuron %2d[%2d] and output neuron  %2d[%2d], connection index: %3d\n", (int)(i + (derivative_degree - 1)*this->dim_inn), (int)(first_glue_neuron + i + (derivative_degree - 1)*this->dim_inn), 0, (int)(first_output_neuron), (int)connection_idx);
+        new_net->add_existing_connection(first_glue_neuron + i + (derivative_degree - 1)*this->dim_inn, first_output_neuron, connection_idx, *this->solution );
     }
+
     map_multiindices2nn[ alpha ] = new_net;
 
     this->differential_equations->at( equation_idx )->add_network( new_net, beta );
 }
 
-void DESolver::set_error_function(unsigned int equation_idx, ErrorFunctionType F, DataSet *conditions) {
+void DESolver::set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions) {
     if( equation_idx >= this->n_equations ){
         throw std::invalid_argument("The parameter 'equation_idx' is too large! It exceeds the number of differential equations.");
     }
@@ -323,7 +319,7 @@ void DESolver::set_error_function(unsigned int equation_idx, ErrorFunctionType F
 }
 
 void DESolver::solve_via_particle_swarm(double *domain_bounds, double c1, double c2, double w,
-                                          unsigned int n_particles, unsigned int max_iters, double gamma,
+                                          size_t n_particles, size_t max_iters, double gamma,
                                           double epsilon, double delta) {
 
     NeuralNetwork *nn;
@@ -331,7 +327,7 @@ void DESolver::solve_via_particle_swarm(double *domain_bounds, double c1, double
 
     /* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
     std::vector<ErrorFunction*> error_functions( this->n_equations );
-    for(unsigned int i = 0; i < this->n_equations; ++i ){
+    for(size_t i = 0; i < this->n_equations; ++i ){
         nn = this->differential_equations->at( i );
         ds = this->errors_functions_data_sets->at( i );
 
@@ -346,26 +342,28 @@ void DESolver::solve_via_particle_swarm(double *domain_bounds, double c1, double
 
     /* DEFINITION OF THE GLOBAL ERROR FUNCTION */
     ErrorSum total_error;
-    for(unsigned int i = 0; i < this->n_equations; ++i ) {
+    for(size_t i = 0; i < this->n_equations; ++i ) {
         total_error.add_error_function( error_functions[i], 1.0 );
     }
 
     ParticleSwarm swarm_01(&total_error, domain_bounds, c1, c2, w, n_particles, max_iters);
 
     this->solution->randomize_weights();
+    this->solution->randomize_biases();
 
     swarm_01.optimize(gamma, epsilon, delta);
 
+    this->solution->copy_parameter_space(swarm_01.get_solution());
 }
 
 NeuralNetwork* DESolver::get_solution() {
     return this->solution;
 }
 
-void DESolver::eval_equation( unsigned int equation_idx, double *weight_values, std::vector<double> &input ) {
-    std::vector<double> output(this->dim_o);
+void DESolver::eval_equation( size_t equation_idx, std::vector<double> *weight_and_biases, std::vector<double> &input ) {
+    std::vector<double> output(1);
 
-    this->differential_equations->at( equation_idx )->eval_single( input, output, weight_values );
+    this->differential_equations->at( equation_idx )->eval_single( input, output, weight_and_biases );
 
     printf("Input: ");
     for( auto e: input ){
diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h
index 968ee5fed85cc8cf7bf020cfb7cbae37fccefaad..dd3f48a74d34eb0f342913f858cd9607c2906063 100644
--- a/src/Solvers/DESolver.h
+++ b/src/Solvers/DESolver.h
@@ -28,19 +28,19 @@ private:
     /**
      * total number of variables
      */
-    unsigned int dim;
+    size_t dim;
 
     /**
      * a vector containing degrees of partial derivatives with respect to each variable
      */
-    std::vector<unsigned int> partial_derivatives_degrees;
+    std::vector<size_t> partial_derivatives_degrees;
 
 public:
     /**
      *
      * @param dimension
      */
-    MultiIndex(unsigned int dimension);
+    MultiIndex(size_t dimension);
 
 
     /**
@@ -48,13 +48,13 @@ public:
      * @param index
      * @param value
      */
-    void set_partial_derivative(unsigned int index, unsigned int value);
+    void set_partial_derivative(size_t index, size_t value);
 
     /**
      *
      * @return
      */
-    std::vector<unsigned int>* get_partial_derivatives_degrees( ) ;
+    std::vector<size_t>* get_partial_derivatives_degrees( ) ;
 
 
     /**
@@ -74,7 +74,7 @@ public:
      *
      * @return
      */
-    unsigned int get_degree( ) const ;
+    size_t get_degree( ) const ;
 };
 
 
@@ -96,10 +96,9 @@ private:
     /* NN as the unknown function */
     NeuralNetwork * solution = nullptr;
 
-
     /* auxilliary variables */
     std::vector<NeuronLogistic*> *solution_inner_neurons = nullptr;
-    unsigned int dim_i = 0, dim_o = 0, dim_inn = 0, n_equations = 0;
+    size_t dim_i = 0, dim_inn = 0, n_equations = 0;
 
 public:
     /**
@@ -107,9 +106,8 @@ public:
      * @param n_equations
      * @param n_inputs
      * @param m
-     * @param n_outputs
      */
-    DESolver( unsigned int n_equations, unsigned int n_inputs, unsigned int m, unsigned int n_outputs );
+    DESolver( size_t n_equations, size_t n_inputs, size_t m );
 
     /**
      * default destructor
@@ -122,7 +120,7 @@ public:
      * @param alpha
      * @param beta
      */
-    void add_to_differential_equation( unsigned int equation_idx, MultiIndex &alpha, double beta );
+    void add_to_differential_equation( size_t equation_idx, MultiIndex &alpha, double beta );
 
     /**
      * Sets the error function for the differential equation with the corresponding index
@@ -130,7 +128,7 @@ public:
      * @param F
      * @param conditions
      */
-    void set_error_function(unsigned int equation_idx, ErrorFunctionType F, DataSet *conditions);
+    void set_error_function(size_t equation_idx, ErrorFunctionType F, DataSet *conditions);
 
 
      /**
@@ -151,8 +149,8 @@ public:
             double c1,
             double c2,
             double w,
-            unsigned int n_particles,
-            unsigned int max_iters,
+            size_t n_particles,
+            size_t max_iters,
             double gamma,
             double epsilon,
             double delta
@@ -167,7 +165,7 @@ public:
     /**
      * For testing purposes only
      */
-     void eval_equation( unsigned int equation_idx, double * weight_values, std::vector<double> &input );
+     void eval_equation( size_t equation_idx, std::vector<double> * weights_and_biases, std::vector<double> &input );
 };
 
 
diff --git a/src/main.cpp b/src/main.cpp
index 2e5468a0d9bbaca2f323b6272f1225981e2d47ed..ebf1402d8999e9f5d2a6da3e020c2608699de3b9 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -6,22 +6,12 @@
  */
 
 #include <iostream>
-#include <cstdio>
-#include <fstream>
 #include <vector>
-#include <utility>
 #include <boost/archive/text_oarchive.hpp>
-#include <boost/archive/text_iarchive.hpp>
 #include "Network/NeuralNetwork.h"
 #include "Neuron/NeuronLinear.h"
-#include "Neuron/NeuronLogistic.h"
-#include "NetConnection/Connection.h"
-#include "NetConnection/ConnectionWeightIdentity.h"
 
 #include "LearningMethods/ParticleSwarm.h"
-#include "Neuron/NeuronBinary.h"
-#include "Neuron/NeuronTanh.h"
-#include "DataSet/DataSet.h"
 
 //TODO rewrite "tests" to separate examples
 
diff --git a/src/net_test_1.cpp b/src/net_test_1.cpp
index 1279ff918b6cb8fe9f20d6cd1a51a6f81411f21a..0296612f0a72a73537d6828758adb98ec7f086ea 100644
--- a/src/net_test_1.cpp
+++ b/src/net_test_1.cpp
@@ -8,7 +8,6 @@
 //
 
 #include <vector>
-#include <utility>
 
 #include "../include/4neuro.h"
 
diff --git a/src/net_test_2.cpp b/src/net_test_2.cpp
index c57369459ad9a4e839beffc17c11b234e74184df..18a9a96c9d5258b35186999ef8fd5d79f90984ef 100644
--- a/src/net_test_2.cpp
+++ b/src/net_test_2.cpp
@@ -10,7 +10,6 @@
 //
 
 #include <vector>
-#include <utility>
 
 #include "../include/4neuro.h"
 
diff --git a/src/net_test_3.cpp b/src/net_test_3.cpp
index 5624a28037056dd816bd65d4efe6a017bf5e8c98..c03d5529bab2e16b929d0f14444d6a5119c4a756 100644
--- a/src/net_test_3.cpp
+++ b/src/net_test_3.cpp
@@ -10,7 +10,6 @@
 //
 
 #include <vector>
-#include <utility>
 
 #include "../include/4neuro.h"
 
diff --git a/src/net_test_ode_1.cpp b/src/net_test_ode_1.cpp
index bb874eed8d69e29a5ea70fc9073ddc965e209519..18167337f00828a0ba16b1acb6b9ab953bf97103 100644
--- a/src/net_test_ode_1.cpp
+++ b/src/net_test_ode_1.cpp
@@ -7,106 +7,600 @@
  *
  * -------------------------------------------
  * Analytical solution: e^(-2x) * (3x + 1)
- *
+ * NN representation: sum over [a_i * (1 + e^(-x * w_i + b_i))^(-1)]
+ * -------------------------------------------
+ * Optimal NN setting without biases (2 inner neurons)
+ * Path   1. w =     -6.35706416, b =      0.00000000, a =     -1.05305639
+ * Path   2. w =     -1.55399893, b =      0.00000000, a =      3.07464411
+ * -------------------------------------------
+ * Optimal NN setting with biases (2 inner neurons)
+ * Path   1. w =      6.75296220, b =     -1.63419516, a =      1.71242130
+ * Path   2. w =      1.86917877, b =      1.09972747, a =     -1.70757578
  * @author Michal KravĨenko
  * @date 17.7.18 -
  */
+
 #include <random>
-#include <vector>
-#include <utility>
 #include <iostream>
 
 #include "../include/4neuro.h"
 #include "Solvers/DESolver.h"
 
-void test_analytical_gradient_y(unsigned int n_inner_neurons) {
-    unsigned int n_inputs = 1;
+double eval_f(double x){
+    return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
+}
+
+double eval_df(double x){
+    return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
+}
+
+double eval_ddf(double x){
+    return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
+}
+
+double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> &parameters){
+    double value= 0.0, wi, ai, bi, ei, ei1;
+    for(size_t i = 0; i < n_inner_neurons; ++i){
+
+        wi = parameters[3 * i];
+        ai = parameters[3 * i + 1];
+        bi = parameters[3 * i + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        value += ai / (ei1);
+    }
+    return value;
+}
+
+double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> &parameters){
+    double value= 0.0, wi, ai, bi, ei, ei1;
+    for(size_t i = 0; i < n_inner_neurons; ++i){
+
+        wi = parameters[3 * i];
+        ai = parameters[3 * i + 1];
+        bi = parameters[3 * i + 2];
+
+        ei = std::pow(E, bi - wi * x);
+        ei1 = ei + 1.0;
+
+        value += ai * wi * ei / (ei1 * ei1);
+    }
+
+    return value;
+}
+
+double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> &parameters){
+    double value= 0.0, wi, ai, bi, ewx, eb;
+
+    for(size_t i = 0; i < n_inner_neurons; ++i){
+
+        wi = parameters[3 * i];
+        ai = parameters[3 * i + 1];
+        bi = parameters[3 * i + 2];
+
+
+        eb = std::pow(E, bi);
+        ewx = std::pow(E, wi * x);
+
+        value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+    }
+
+    return value;
+}
+
+//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
+double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, ai, bi, ei, ei1;
+    wi = parameters[3 * neuron_idx];
+    ai = parameters[3 * neuron_idx + 1];
+    bi = parameters[3 * neuron_idx + 2];
+
+    ei = std::pow(E, bi - wi * x);
+    ei1 = ei + 1.0;
+
+    return (ai * x * ei) / (ei1 * ei1);
+}
+
+//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
+double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> &parameters){
+
+    double wi, ai, bi, ei, ei1;
+
+    wi = parameters[3 * neuron_idx];
+    ai = parameters[3 * neuron_idx + 1];
+    bi = parameters[3 * neuron_idx + 2];
+
+    ei = std::pow(E, bi - wi * x);
+    ei1 = ei + 1.0;
+
+    return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
+}
+
+//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
+double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, ai, bi, eb, ewx;
+
+    wi = parameters[3 * neuron_idx];
+    ai = parameters[3 * neuron_idx + 1];
+    bi = parameters[3 * neuron_idx + 2];
+
+    eb = std::pow(E, bi);
+    ewx = std::pow(E, wi * x);
+
+    return  -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+}
+
+//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
+double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, bi, ei, ei1;
+
+    wi = parameters[3 * neuron_idx];
+    bi = parameters[3 * neuron_idx + 2];
+
+    ei = std::pow(E, bi - wi * x);
+    ei1 = ei + 1.0;
+
+    return 1.0 / ei1;
+}
+
+//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
+double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, bi, ei, ei1;
+
+    wi = parameters[3 * neuron_idx];
+    bi = parameters[3 * neuron_idx + 2];
+
+    ei = std::pow(E, bi - wi * x);
+    ei1 = ei + 1.0;
+
+    return (wi*ei)/(ei1 * ei1);
+}
+
+//ddNN partial derivative (ai):  -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
+double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, bi, eip, ewx, eb;
+
+    wi = parameters[3 * neuron_idx];
+    bi = parameters[3 * neuron_idx + 2];
+
+    eip = std::pow(E, bi + wi * x);
+    eb = std::pow(E, bi);
+    ewx = std::pow(E, wi * x);
+
+    return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
+}
+
+//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
+double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, bi, ei, ai, ei1;
+    wi = parameters[3 * neuron_idx];
+    ai = parameters[3 * neuron_idx + 1];
+    bi = parameters[3 * neuron_idx + 2];
+
+    ei = std::pow(E, bi - wi * x);
+    ei1 = ei + 1.0;
+
+    return -(ai * ei)/(ei1 * ei1);
+}
+
+//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
+double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, bi, ai, ewx, eb;
+
+    wi = parameters[3 * neuron_idx];
+    ai = parameters[3 * neuron_idx + 1];
+    bi = parameters[3 * neuron_idx + 2];
+
+    eb = std::pow(E, bi);
+    ewx = std::pow(E, wi*x);
+
+    return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
 }
 
-void test_analytical_y(unsigned int n_inner_neurons){
-    unsigned int n_inputs = 1;
-    unsigned int n_outputs = 1;
-    unsigned int n_equations = 1;
+//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
+double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> &parameters){
+    double wi, bi, ai, ewx, eb;
+
+    wi = parameters[3 * neuron_idx];
+    ai = parameters[3 * neuron_idx + 1];
+    bi = parameters[3 * neuron_idx + 2];
+
+    eb = std::pow(E, bi);
+    ewx = std::pow(E, wi*x);
+
+    return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
+}
+
+double eval_error_function(std::vector<double> &parameters, size_t n_inner_neurons, std::vector<double> test_points){
+
+    double output = 0.0, approx, frac = 1.0 / (test_points.size());
+
+    for(auto x: test_points){
+        /* governing equation */
+        approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
+
+        output += (0.0 - approx) * (0.0 - approx) * frac;
+
+    }
+
+    /* BC */
+    approx = eval_approx_f(0.0, n_inner_neurons, parameters);
+    output += (1.0 - approx) * (1.0 - approx);
+
+    approx = eval_approx_df(0.0, n_inner_neurons, parameters);
+    output += (1.0 - approx) * (1.0 - approx);
+
+
+    return output;
+}
+
+void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, bool opti_w, bool opti_b, double d1_s, double d1_e,
+                                size_t test_size, double ts, double te) {
 
     /* SETUP OF THE TRAINING DATA */
     std::vector<double> inp, out;
 
-    double d1_s = 0.0, d1_e = 1.0, frac, alpha, x;
+    double frac, alpha, x;
+    double grad_norm = accuracy * 10.0, mem, ai, bi, wi, error, derror, approx, xj, gamma, total_error, sk, sy, sx, beta;
+    double grad_norm_prev = grad_norm;
+    size_t i, j, iter_idx = 0;
 
     /* TRAIN DATA FOR THE GOVERNING DE */
-    std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
-    unsigned int train_size = 150;
+    std::vector<double> data_points(train_size);
 
     /* ISOTROPIC TRAIN SET */
-    frac = (d1_e - d1_s) / (train_size - 1);
+//    frac = (d1_e - d1_s) / (train_size - 1);
 //    for(unsigned int i = 0; i < train_size; ++i){
-//        x = frac * i;
-//        inp = {x};
-//        out = {std::pow(E, -2 * x) * (3 * x + 1)};
-//        data_vec_g.emplace_back(std::make_pair(inp, out));
-//
-//        std::cout << i + 1 << " " << x << " " << out[0] << std::endl;
+//        data_points[i] = frac * i;
 //    }
 
     /* CHEBYSCHEV TRAIN SET */
-    alpha = PI / (train_size - 1);
+    alpha = PI / (train_size );
     frac = 0.5 * (d1_e - d1_s);
-    for(unsigned int i = 0; i < train_size; ++i){
+    for(i = 0; i < train_size; ++i){
         x = (std::cos(PI - alpha * i) + 1.0) * frac + d1_s;
-        inp = {x};
-        out = {std::pow(E, -2 * x) * (3 * x + 1)};
-        data_vec_g.emplace_back(std::make_pair(inp, out));
+        data_points[i] = x;
+    }
+
+    std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
+    std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
+    std::vector<double> *params_current = new std::vector<double>(guess);
+    std::vector<double> *params_prev = new std::vector<double>(guess);
+    std::vector<double> *conjugate_direction_current = new std::vector<double>(3 * n_inner_neurons);
+    std::vector<double> *conjugate_direction_prev = new std::vector<double>(3 * n_inner_neurons);
+
+    std::vector<double> *ptr_mem;
+
 
-        std::cout << i + 1 << " " << x << " " << out[0] << std::endl;
+    std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
+    std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
+    std::fill(conjugate_direction_current->begin(), conjugate_direction_current->end(), 0.0);
+    std::fill(conjugate_direction_prev->begin(), conjugate_direction_prev->end(), 0.0);
+
+
+
+    for (i = 0; i < n_inner_neurons; ++i) {
+        wi = (*params_current)[3 * i];
+        ai = (*params_current)[3 * i + 1];
+        bi = (*params_current)[3 * i + 2];
+
+        printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
     }
-    DataSet ds_00(&data_vec_g);
 
-//    return;
+    gamma = 1.0;
+    while( grad_norm > accuracy) {
+        iter_idx++;
+
+        /* current gradient */
+        std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
+
+        /* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
+        xj = 0.0;
+        mem = (1.0 - eval_approx_f(xj, n_inner_neurons, *params_current));
+        derror = 2.0 * mem;
+        total_error = mem * mem;
+        for(i = 0; i < n_inner_neurons; ++i){
+            if(opti_w){
+                (*gradient_current)[3 * i] -= derror * eval_approx_dw_f(xj, i, *params_current);
+                (*gradient_current)[3 * i + 1] -= derror * eval_approx_da_f(xj, i, *params_current);
+            }
+            if(opti_b){
+                (*gradient_current)[3 * i + 2] -= derror * eval_approx_db_f(xj, i, *params_current);
+            }
+        }
+//        for(auto e: *gradient_current){
+//            printf("[%10.8f]", e);
+//        }
+//        printf("\n");
+
+        /* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
+        mem = (1.0 - eval_approx_df(xj, n_inner_neurons, *params_current));
+        derror = 2.0 * mem;
+        total_error += mem * mem;
+        for(i = 0; i < n_inner_neurons; ++i){
+            if(opti_w){
+                (*gradient_current)[3 * i] -= derror * eval_approx_dw_df(xj, i, *params_current);
+                (*gradient_current)[3 * i + 1] -= derror * eval_approx_da_df(xj, i, *params_current);
+            }
+            if(opti_b){
+                (*gradient_current)[3 * i + 2] -= derror * eval_approx_db_df(xj, i, *params_current);
+            }
+        }
+//        for(auto e: *gradient_current){
+//            printf("[%10.8f]", e);
+//        }
+//        printf("\n");
+
+        for(j = 0; j < data_points.size(); ++j){
+            xj = data_points[i];
+            /* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
+            approx= eval_approx_ddf(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current);
+            mem = 0.0 - approx;
+            error = 2.0 * mem / train_size;
+            for(i = 0; i < n_inner_neurons; ++i){
+                if(opti_w){
+                    (*gradient_current)[3 * i] -= error * (eval_approx_dw_ddf(xj, i, *params_current) + 4.0 * eval_approx_dw_df(xj, i, *params_current) + 4.0 * eval_approx_dw_f(xj, i, *params_current));
+                    (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_ddf(xj, i, *params_current) + 4.0 * eval_approx_da_df(xj, i, *params_current) + 4.0 * eval_approx_da_f(xj, i, *params_current));
+                }
+                if(opti_b){
+                    (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_ddf(xj, i, *params_current) + 4.0 * eval_approx_db_df(xj, i, *params_current) + 4.0 * eval_approx_db_f(xj, i, *params_current));
+                }
+            }
+            total_error += mem * mem / train_size;
+        }
+
+//        /* error of the unknown function: y(x) = e^(-2x)*(3x+1) => e3 = 1/n * (e^(-2x)*(3x+1) - y(x))^2 */
+//        for(j = 0; j < data_points.size(); ++j) {
+//            xj = data_points[i];
+//            approx= eval_approx_f(xj, n_inner_neurons, *params_current);
+//            mem = (eval_f(xj) - approx);
+//            error = 2.0 * mem / train_size;
+//            for(i = 0; i < n_inner_neurons; ++i){
+//                if(opti_w){
+//                    (*gradient_current)[3 * i] -= error * (eval_approx_dw_f(xj, i, *params_current));
+//                    (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_f(xj, i, *params_current));
+//                }
+//                if(opti_b){
+//                    (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_f(xj, i, *params_current));
+//                }
+//            }
+//            total_error += mem * mem / train_size;
+//        }
+
+//        /* error of the unknown function: y'(x) = e^(-2x)*(1-6x) => e3 = 1/n * (e^(-2x)*(1-6x) - y'(x))^2 */
+//        for(j = 0; j < data_points.size(); ++j) {
+//            xj = data_points[i];
+//            approx= eval_approx_df(xj, n_inner_neurons, *params_current);
+//            mem = (eval_df(xj) - approx);
+//            error = 2.0 * mem / train_size;
+//            for(i = 0; i < n_inner_neurons; ++i){
+//                if(opti_w){
+//                    (*gradient_current)[3 * i] -= error * (eval_approx_dw_df(xj, i, *params_current));
+//                    (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_df(xj, i, *params_current));
+//                }
+//                if(opti_b){
+//                    (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_df(xj, i, *params_current));
+//                }
+//            }
+//            total_error += mem * mem / train_size;
+//        }
+
+//        /* error of the unknown function: y''(x) = 4e^(-2x)*(3x-2) => e3 = 4/n * (e^(-2x)*(3x-2) - y''(x))^2 */
+//        for(j = 0; j < data_points.size(); ++j) {
+//            xj = data_points[i];
+//            approx= eval_approx_ddf(xj, n_inner_neurons, *params_current);
+//            mem = (eval_ddf(xj) - approx);
+//            error = 2.0 * mem / train_size;
+//            for(i = 0; i < n_inner_neurons; ++i){
+//                if(opti_w){
+//                    (*gradient_current)[3 * i] -= error * (eval_approx_dw_ddf(xj, i, *params_current));
+//                    (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_ddf(xj, i, *params_current));
+//                }
+//                if(opti_b){
+//                    (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_ddf(xj, i, *params_current));
+//                }
+//            }
+////            printf("x: %f -> error: %f\n", xj, error);
+//            total_error += mem * mem / train_size;
+//        }
+
+//        for(auto e: *gradient_current){
+//            printf("[%10.8f]", e);
+//        }
+//        printf("\n");
+
+        /* conjugate direction coefficient (Polak-Ribiere): <-grad_curr, -grad_curr + grad_prev> / <-grad_prev, -grad_prev >*/
+
+        /* Update of the parameters */
+
+        if(iter_idx < 10 || iter_idx % 1 == 0){
+            for(i = 0; i < conjugate_direction_current->size(); ++i){
+                (*conjugate_direction_current)[i] = - (*gradient_current)[i];
+            }
+        }
+        else{
+            /* conjugate gradient */
+            sk = sy = 0.0;
+            for(i = 0; i < conjugate_direction_current->size(); ++i){
+                sk += (*gradient_current)[i] * ((*gradient_current)[i] - (*gradient_prev)[i]);
+                sy += (*gradient_prev)[i] * (*gradient_prev)[i];
+            }
+            beta = std::max(0.0, sk / sy);
+
+            /* update of the conjugate direction */
+            for(i = 0; i < conjugate_direction_current->size(); ++i){
+                (*conjugate_direction_current)[i] = beta * (*conjugate_direction_prev)[i] - (*gradient_current)[i];
+            }
+        }
+
+        /* step length calculation */
+        if(iter_idx < 10){
+            /* fixed step length */
+            gamma = 0.000001;
+        }
+        else{
+//            /* Barzilai-Borwein */
+//            sk = sy = 0.0;
+//
+//            for(i = 0; i < gradient_current->size(); ++i){
+//                sx = (*params_current)[i] - (*params_prev)[i];
+//                sg = (*conjugate_direction_current)[i] - (*conjugate_direction_prev)[i];
+//
+//                sk += sx * sg;
+//                sy += sg * sg;
+//            }
+//
+//            gamma = -sk / sy;
 
-    DESolver solver_01( n_equations, n_inputs, n_inner_neurons, n_outputs );
 
-    NeuralNetwork * solution = solver_01.get_solution();
 
 
+//            /* Line search */
+//
+//            gamma = line_search(10.0, conjugate_direction, *params_current, *gradient_current, n_inner_neurons, ds_00);
+        }
 
-    /* PARTICLE SWARM TRAINING METHOD SETUP */
-    unsigned int max_iters = 150;
 
-    //must encapsulate each of the partial error functions
-    double *domain_bounds = new double[ 2 * n_inner_neurons * (n_inputs + n_outputs) ];
-    for(unsigned int i = 0; i < n_inner_neurons * (n_inputs + n_outputs); ++i){
-        domain_bounds[2 * i] = -800.0;
-        domain_bounds[2 * i + 1] = 800.0;
+//        for(auto e: conjugate_direction){
+//            printf("[%10.8f]", e);
+//        }
+//        printf("\n");
+//
+//        for(auto e: *params_current){
+//            printf("[%10.8f]", e);
+//        }
+//        printf("\n");
+
+        /* norm of the gradient calculation */
+        grad_norm_prev = grad_norm;
+        /* adaptive step-length */
+        sk = 0.0;
+        for(i = 0; i < gradient_current->size(); ++i){
+            sx = (*gradient_current)[i] - (*gradient_prev)[i];
+            sk += sx * sx;
+        }
+        sk = std::sqrt(sk);
+
+        if(sk <= 1e-3 || grad_norm < grad_norm_prev){
+            /* movement on a line */
+            /* new slope is less steep, speed up */
+            gamma *= 1.0005;
+        }
+        else if(grad_norm > grad_norm_prev){
+            /* new slope is more steep, slow down*/
+            gamma /= 1.0005;
+        }
+        else{
+            gamma /= 1.005;
+        }
+        grad_norm = 0.0;
+        for(auto v: *gradient_current){
+            grad_norm += v * v;
+        }
+        grad_norm = std::sqrt(grad_norm);
+
+
+        for(i = 0; i < gradient_current->size(); ++i){
+//            (*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
+            (*params_prev)[i] = (*params_current)[i] + gamma * (*conjugate_direction_current)[i];
+        }
+//        printf("\n");
+
+
+        /* switcheroo */
+        ptr_mem = gradient_prev;
+        gradient_prev = gradient_current;
+        gradient_current = ptr_mem;
+
+        ptr_mem = params_prev;
+        params_prev = params_current;
+        params_current = ptr_mem;
+
+        ptr_mem = conjugate_direction_prev;
+        conjugate_direction_prev = conjugate_direction_current;
+        conjugate_direction_current = ptr_mem;
+
+
+
+//        for (i = 0; i < n_inner_neurons; ++i) {
+//            wi = (*params_current)[3 * i];
+//            ai = (*params_current)[3 * i + 1];
+//            bi = (*params_current)[3 * i + 2];
+//
+//            printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", i + 1, wi, bi, ai);
+//        }
+
+        if(iter_idx % 100 == 0){
+            printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f (%10.8f)\n", (int)iter_idx, gamma, grad_norm, total_error, eval_error_function(*params_prev, n_inner_neurons, data_points));
+//            for (i = 0; i < n_inner_neurons; ++i) {
+//                wi = (*params_current)[3 * i];
+//                ai = (*params_current)[3 * i + 1];
+//                bi = (*params_current)[3 * i + 2];
+//
+//                printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", i + 1, wi, bi, ai);
+//            }
+//            std::cout << "-----------------------------" << std::endl;
+            std::cout.flush();
+        }
     }
+    printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n",(int) iter_idx, gamma, grad_norm, eval_error_function(*params_current, n_inner_neurons, data_points));
+    std::cout.flush();
 
-    double c1 = 0.5, c2 = 0.8, w = 0.8;
+    for (i = 0; i < n_inner_neurons; ++i) {
+        wi = (*params_current)[3 * i];
+        ai = (*params_current)[3 * i + 1];
+        bi = (*params_current)[3 * i + 2];
 
-    unsigned int n_particles = 100;
+        printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
+    }
 
-    double gamma = 0.5, epsilon = 0.001, delta = 0.9;
+    printf("\n--------------------------------------------------\ntest output for gnuplot\n--------------------------------------------------\n");
+    if(total_error < 1e-3 || true){
+        /* ISOTROPIC TEST SET */
+        frac = (te - ts) / (test_size - 1);
+        for(j = 0; j < test_size; ++j){
+            xj = frac * j + ts;
 
-    MSE mse(solution, &ds_00);
-    ParticleSwarm swarm_01(&mse, domain_bounds, c1, c2, w, n_particles, max_iters);
-    swarm_01.optimize( gamma, epsilon, delta );
+            std::cout << j + 1 << " " << xj << " " << eval_f(xj) << " " << eval_approx_f(xj, n_inner_neurons, *params_current) << " " << eval_df(xj) << " " << eval_approx_df(xj, n_inner_neurons, *params_current) << " " << eval_ddf(xj) << " " << eval_approx_ddf(xj, n_inner_neurons, *params_current) << std::endl;
+        }
+    }
 
-    unsigned int n_test_points = 100;
-    std::vector<double> input(1), output(1);
-    double error = 0.0;
-    for(unsigned int i = 0; i < n_test_points; ++i){
-        input[0] = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
-        solution->eval_single(input, output);
-        std::cout << i + 1 << " " << std::abs(output[0] - std::pow(E, -2 * input[0]) * (3 * input[0] + 1))<< " " << output[0] << std::endl;
+    /* error analysis */
+    double referential_error = 0.0;
+
+    mem = eval_approx_df(0.0, n_inner_neurons, *params_current);
+    referential_error += (mem - 1.0) * (mem - 1.0);
+
+    mem = eval_approx_f(0.0, n_inner_neurons, *params_current);
+    referential_error += (mem - 1.0) * (mem - 1.0);
+
+    frac = 1.0 / train_size;
+    for(j = 0; j < data_points.size(); ++j){
+        xj = data_points[j];
+
+        mem = 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + eval_approx_ddf(xj, n_inner_neurons, *params_current);
+
+        referential_error += mem * mem * frac;
     }
 
-    delete [] domain_bounds;
+    printf("Total error (as used in the NN example): %10.8f\n", referential_error);
+
+    delete gradient_current;
+    delete gradient_prev;
+    delete params_current;
+    delete params_prev;
+    delete conjugate_direction_current;
+    delete conjugate_direction_prev;
 }
 
-void test_odr(unsigned int n_inner_neurons){
-    unsigned int n_inputs = 1;
-    unsigned int n_outputs = 1;
-    unsigned int n_equations = 3;
 
-    DESolver solver_01( n_equations, n_inputs, n_inner_neurons, n_outputs );
+void test_odr(size_t n_inner_neurons){
+    size_t n_inputs = 1;
+    size_t n_equations = 3;
+
+    DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
 
     /* SETUP OF THE EQUATIONS */
     MultiIndex alpha_0( n_inputs );
@@ -126,19 +620,15 @@ void test_odr(unsigned int n_inner_neurons){
     /* neumann boundary condition */
     solver_01.add_to_differential_equation( 2, alpha_1, 1.0 );
 
-//    double weights[2] = {0.5, 1.0};
-//    std::vector<double> inp = { 1.0 };
-//    solver_01.eval_equation( 0, weights, inp );
-
 
     /* SETUP OF THE TRAINING DATA */
     std::vector<double> inp, out;
 
-    double d1_s = 0.0, d1_e = 4.0, frac, alpha;
+    double d1_s = 0.0, d1_e = 4.0, frac;
 
     /* TRAIN DATA FOR THE GOVERNING DE */
     std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
-    unsigned int train_size = 100;
+    unsigned int train_size = 150;
 
     /* ISOTROPIC TRAIN SET */
     frac = (d1_e - d1_s) / (train_size - 1);
@@ -179,38 +669,45 @@ void test_odr(unsigned int n_inner_neurons){
 
 
 
-
-
     /* PARTICLE SWARM TRAINING METHOD SETUP */
     unsigned int max_iters = 100;
 
     //must encapsulate each of the partial error functions
-    double *domain_bounds = new double[ 2 * n_inner_neurons * (n_inputs + n_outputs) ];
-    for(unsigned int i = 0; i < n_inner_neurons * (n_inputs + n_outputs); ++i){
+    double *domain_bounds = new double[ 6 * n_inner_neurons ];
+    for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
         domain_bounds[2 * i] = -800.0;
         domain_bounds[2 * i + 1] = 800.0;
     }
 
-    double c1 = 0.5, c2 = 1.5, w = 0.8;
+    double c1 = 0.5, c2 = 0.75, w = 0.8;
 
-    unsigned int n_particles = 100;
+    unsigned int n_particles = 1000;
 
     double gamma = 0.5, epsilon = 0.02, delta = 0.9;
 
     solver_01.solve_via_particle_swarm( domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );
 
     NeuralNetwork *solution = solver_01.get_solution();
+    std::vector<double> parameters(3 * n_inner_neurons);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
+    std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
+    std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
+    for(size_t i = 0; i < n_inner_neurons; ++i){
+        parameters[3 * i] = weight_params->at(i);
+        parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
+        parameters[3 * i + 2] = biases_params->at(i);
+    }
 
-
-    unsigned int n_test_points = 100;
+    unsigned int n_test_points = 150;
     std::vector<double> input(1), output(1);
+    double x;
     for(unsigned int i = 0; i < n_test_points; ++i){
 
-        input[0] = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
+        x = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
+        input[0] = x;
 
         solution->eval_single(input, output);
 
-        std::cout << i + 1 << " " << output[0] << std::endl;
+        std::cout << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;
     }
 
 
@@ -219,12 +716,44 @@ void test_odr(unsigned int n_inner_neurons){
 
 int main() {
 
-    unsigned int n_inner_neurons = 20;
+    unsigned int n_inner_neurons = 6;
 
-//    test_odr(n_inner_neurons);
+    test_odr(n_inner_neurons);
 
-    test_analytical_y(n_inner_neurons);
-    test_analytical_gradient_y(n_inner_neurons);
+//    bool optimize_weights = true;
+//    bool optimize_biases = true;
+//    unsigned int train_size = 150;
+//    double accuracy = 1e-5;
+//    double ds = 0.0;
+//    double de = 4.0;
+//
+//    unsigned int test_size = 300;
+//    double ts = ds;
+//    double te = de;
+//
+////    std::vector<double> init_guess = {0.35088209, -0.23738505, 0.14160885, 3.72785473, -6.45758308, 1.73769138};
+//    std::vector<double> init_guess(3 * n_inner_neurons);
+//
+//    std::random_device seeder;
+//    std::mt19937 gen(seeder());
+//    std::uniform_real_distribution<double> dist(-1.0, 1.0);
+//    for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
+//        init_guess[i] = dist(gen);
+//        init_guess[i] = dist(gen);
+//    }
+//    if(!optimize_biases){
+//        for(unsigned int i = 0; i < n_inner_neurons; ++i){
+//            init_guess[3 * i + 2] = 0.0;
+//        }
+//    }
+//    if(!optimize_weights){
+//        for(unsigned int i = 0; i < n_inner_neurons; ++i){
+//            init_guess[3 * i] = 0.0;
+//            init_guess[3 * i + 1] = 0.0;
+//        }
+//    }
+//
+//    test_analytical_gradient_y(init_guess, accuracy, n_inner_neurons, train_size, optimize_weights, optimize_biases, ds, de, test_size, ts, te);
 
 
     return 0;
diff --git a/src/net_test_pde_1.cpp b/src/net_test_pde_1.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..99909cc8c0e3f81eb9c2a863fe1fe123474e2df3
--- /dev/null
+++ b/src/net_test_pde_1.cpp
@@ -0,0 +1,161 @@
+/**
+ * Example solving the water flow 1D diffusion PDE:
+ *
+ * (d^2/d^xx^x)y(x, t) - (d/dt)y(x, t) = 0, for t in [0, 1] x [0, 1]
+ * y(0, t) = sin(t)
+ * y(x, 0) = e^(-(0.5)^(0.5)x) * sin(-(0.5)^(0.5)x)
+ *
+ * -------------------------------------------
+ * Analytical solution:
+ * NN representation: sum over [a_i * (1 + e^(bi - x * w_ix - t * w_it))^(-1)]
+ * -------------------------------------------
+ * Optimal NN setting with biases (2 inner neurons)
+ *
+ * @author Michal KravĨenko
+ * @date 9.8.18
+ */
+
+#include <random>
+#include <iostream>
+
+#include "../include/4neuro.h"
+#include "Solvers/DESolver.h"
+
+
+void test_odr(size_t n_inner_neurons){
+
+    /* solution properties */
+    size_t train_size = 10;
+    double d1_s = 0.0, d1_e = 1.0;
+
+    unsigned int max_iters = 100;
+    unsigned int n_particles = 10;
+
+
+    /* do not change below */
+    size_t n_inputs = 2;
+    size_t n_equations = 3;
+    DESolver solver_01( n_equations, n_inputs, n_inner_neurons );
+
+    /* SETUP OF THE EQUATIONS */
+    MultiIndex alpha_00( n_inputs );
+    MultiIndex alpha_01( n_inputs );
+    MultiIndex alpha_20( n_inputs );
+    alpha_00.set_partial_derivative(0, 0);
+    alpha_01.set_partial_derivative(0, 1);
+    alpha_20.set_partial_derivative(2, 0);
+
+    /* the governing differential equation */
+    solver_01.add_to_differential_equation( 0, alpha_20,  1.0 );
+    solver_01.add_to_differential_equation( 0, alpha_01, -1.0 );
+
+    /* dirichlet boundary condition */
+    solver_01.add_to_differential_equation( 1, alpha_00, 1.0 );
+    solver_01.add_to_differential_equation( 2, alpha_00, 1.0 );
+
+
+    /* SETUP OF THE TRAINING DATA */
+    std::vector<double> inp, out;
+
+    double frac, x, t;
+
+    /* TRAIN DATA FOR THE GOVERNING DE */
+    std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
+
+
+    /* ISOTROPIC TRAIN SET */
+    frac = (d1_e - d1_s) / (train_size - 1);
+    for(size_t i = 0; i < train_size; ++i){
+        inp = {0.0, 0.0};
+        out = {0.0};
+        data_vec_g.emplace_back(std::make_pair(inp, out));
+    }
+    DataSet ds_00(&data_vec_g);
+
+    /* ISOTROPIC TRAIN DATA FOR THE FIRST DIRICHLET BC */
+    std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_t;
+    frac = (d1_e - d1_s) / (train_size - 1);
+    for(size_t i = 0; i < train_size; ++i){
+        t = i * frac;
+        inp = { 0.0, t };
+        out = { std::sin( t ) };
+
+        data_vec_t.emplace_back(std::make_pair(inp, out));
+    }
+    DataSet ds_t(&data_vec_t);
+
+    /* ISOTROPIC TRAIN DATA FOR THE SECOND DIRICHLET BC */
+    std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_x;
+    frac = (d1_e - d1_s) / (train_size - 1);
+    for(size_t i = 0; i < train_size; ++i){
+        x = i * frac;
+        inp = { x, 0.0 };
+        out = { std::pow(E, -0.707106781 * x) * std::sin( -0.707106781 * x )};
+
+        data_vec_x.emplace_back(std::make_pair(inp, out));
+    }
+    DataSet ds_x(&data_vec_x);
+
+
+
+    /* Placing the conditions into the solver */
+    solver_01.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
+    solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_t );
+    solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_x );
+
+
+
+    /* PARTICLE SWARM TRAINING METHOD SETUP */
+
+
+    //must encapsulate each of the partial error functions
+    double *domain_bounds = new double[ 6 * n_inner_neurons ];
+    for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
+        domain_bounds[2 * i] = -800.0;
+        domain_bounds[2 * i + 1] = 800.0;
+    }
+
+    double c1 = 0.5, c2 = 0.75, w = 0.8;
+
+
+
+    double gamma = 0.5, epsilon = 0.02, delta = 0.9;
+
+    solver_01.solve_via_particle_swarm( domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );
+
+//    NeuralNetwork *solution = solver_01.get_solution();
+//    std::vector<double> parameters(3 * n_inner_neurons);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
+//    std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
+//    std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
+//    for(size_t i = 0; i < n_inner_neurons; ++i){
+//        parameters[3 * i] = weight_params->at(i);
+//        parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
+//        parameters[3 * i + 2] = biases_params->at(i);
+//    }
+//
+//    unsigned int n_test_points = 150;
+//    std::vector<double> input(1), output(1);
+//    double x;
+//    for(unsigned int i = 0; i < n_test_points; ++i){
+//
+//        x = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
+//        input[0] = x;
+//
+//        solution->eval_single(input, output);
+//
+//        std::cout << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;
+//    }
+
+
+    delete [] domain_bounds;
+}
+
+int main() {
+
+    unsigned int n_inner_neurons = 2;
+
+    test_odr(n_inner_neurons);
+
+
+    return 0;
+}
\ No newline at end of file
diff --git a/src/tests/ConnectionWeightIdentity_test.cpp b/src/tests/ConnectionWeightIdentity_test.cpp
index c3193998268cc3706a91e1ef774a4eb4bb85f22a..1e5758f0605d0c8af0ecf333d332915d6e68873b 100644
--- a/src/tests/ConnectionWeightIdentity_test.cpp
+++ b/src/tests/ConnectionWeightIdentity_test.cpp
@@ -8,7 +8,7 @@
 #define BOOST_TEST_NO_MAIN
 
 #include <boost/test/unit_test.hpp>
-#include "../NetConnection/ConnectionWeightIdentity.h"
+#include "../NetConnection/ConnectionFunctionIdentity.h"
 
 /**
  * Boost testing suite for testing ConnectionWeightIdentity.h
@@ -20,8 +20,8 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeightIdentity_test)
      */
     BOOST_AUTO_TEST_CASE(ConnectionWeightIdentity_construction_test) {
         std::vector<double> weight_array = {1, 2, 3, 4, 5};
-        //Test of none exception when creation new instance of ConnectionWeightIdentity
-        BOOST_CHECK_NO_THROW(ConnectionWeightIdentity CWI(&weight_array));
+        //Test of none exception when creation new instance of ConnectionFunctionIdentity
+        BOOST_CHECK_NO_THROW(ConnectionFunctionIdentity CWI(&weight_array));
     }
 
     /**
@@ -29,7 +29,7 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeightIdentity_test)
      */
     BOOST_AUTO_TEST_CASE(ConnectionWeightIdentity_eval_test) {
         std::vector<double> weight_array = {1, 2, 3, 4, 5};
-        ConnectionWeightIdentity CWI(&weight_array);
+        ConnectionFunctionIdentity CWI(&weight_array);
         int para[5] = {3, 1, 2, 3, 4};
         CWI.SetParamIndices(para);
 
diff --git a/src/tests/ConnectionWeight_test.cpp b/src/tests/ConnectionWeight_test.cpp
index c54060fb634b0e92227a7c1617908f8e92a2ac50..5034617b33f4711541c85348442dfd3efd8f94d9 100644
--- a/src/tests/ConnectionWeight_test.cpp
+++ b/src/tests/ConnectionWeight_test.cpp
@@ -8,7 +8,7 @@
 #define BOOST_TEST_NO_MAIN
 
 #include <boost/test/unit_test.hpp>
-#include "../NetConnection/ConnectionWeight.h"
+#include "../NetConnection/ConnectionFunctionGeneral.h"
 
 /**
  * Boost testing suite for testing ConnectionWeight.h
@@ -22,8 +22,8 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeight_test)
      std::vector<double> * w_array = nullptr ;
 
      //Tests of no exception when, create new instance
-     BOOST_CHECK_NO_THROW( ConnectionWeight conn(2, w_array ));
-     BOOST_CHECK_NO_THROW( ConnectionWeight conn);
+     BOOST_CHECK_NO_THROW( ConnectionFunctionGeneral conn(2, w_array ));
+     BOOST_CHECK_NO_THROW( ConnectionFunctionGeneral conn);
     }
 
     /**
@@ -32,8 +32,8 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeight_test)
     BOOST_AUTO_TEST_CASE(ConnectionWeight_param_test){
         std::vector<double>  w_array= {0,1,2,3,4} ;
       
-        ConnectionWeight conn(5, &w_array );
-        ConnectionWeight connbad(7, &w_array);
+        ConnectionFunctionGeneral conn(5, &w_array );
+        ConnectionFunctionGeneral connbad(7, &w_array);
         int para[5]={0,1,2,3,4};
 
         //Test of no exception when call SetParamIndices method on instance created with with correct parameters
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeight_test)
     BOOST_AUTO_TEST_CASE(ConnectionWeight_eval__test) {
         std::vector<double>  w_array= {1,2,3,4,5} ;
         
-        ConnectionWeight conn(5, &w_array );
+        ConnectionFunctionGeneral conn(5, &w_array );
         int para[5]={1,2,3,4,5};
         conn.SetParamIndices(para);
 
@@ -68,7 +68,7 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeight_test)
         std::vector<double>  w_array= {1,2,3,4,5} ;
         double w_array2[5] = {5,4,3,2,1};
         
-        ConnectionWeight conn(5, &w_array);
+        ConnectionFunctionGeneral conn(5, &w_array);
         int para[5]={0,1,2,3,4};
         conn.SetParamIndices(para);
         conn.adjust_weights(w_array2);
@@ -83,7 +83,7 @@ BOOST_AUTO_TEST_SUITE(ConnectionWeight_test)
     BOOST_AUTO_TEST_CASE(ConnectionWeight_weight_set_test) {
         std::vector<double>  w_array= {2,3,4,5,6} ;
         double w_array2[5] = {1,2,3,4,5};
-        ConnectionWeight conn(5, &w_array );
+        ConnectionFunctionGeneral conn(5, &w_array );
         conn.eval();
         int para[5]={0,1,2,3,4};
         conn.SetParamIndices(para);
diff --git a/src/tests/DataSet_test.cpp b/src/tests/DataSet_test.cpp
index 3e6ae60075a61113c16501ec9cce1a4a426935fd..8839f2d8f7c44d5354f8420e2a07a7986872bc66 100644
--- a/src/tests/DataSet_test.cpp
+++ b/src/tests/DataSet_test.cpp
@@ -9,8 +9,6 @@
 
 #include <boost/test/unit_test.hpp>
 #include <boost/test/output_test_stream.hpp>
-#include <iostream>
-#include <boost/archive/archive_exception.hpp>
 #include "../DataSet/DataSet.h"
 //#include <boost/filesystem.hpp>
 
diff --git a/src/tests/ErrorFunctions_test.cpp b/src/tests/ErrorFunctions_test.cpp
index 550e71983c828a9efb3099c63c3603131a74bebc..318ed777343fe277d2982a963183f4ec6600f6d5 100644
--- a/src/tests/ErrorFunctions_test.cpp
+++ b/src/tests/ErrorFunctions_test.cpp
@@ -9,7 +9,6 @@
 
 #include <boost/test/unit_test.hpp>
 #include "../ErrorFunction/ErrorFunctions.h"
-#include "../Neuron/NeuronLinear.h"
 
 /**
  * Boost testing suite for testing ErrorFunction.h
diff --git a/src/tests/NeuralNetworkSum_test.cpp b/src/tests/NeuralNetworkSum_test.cpp
index 889f984cbfbbe9fd2159fea25397a1d6291576dd..cb4a1f57570753f26cac4b3979799a82aa5081d4 100644
--- a/src/tests/NeuralNetworkSum_test.cpp
+++ b/src/tests/NeuralNetworkSum_test.cpp
@@ -9,7 +9,6 @@
 
 #include <boost/test/unit_test.hpp>
 #include "../Network/NeuralNetworkSum.h"
-#include "../Neuron/NeuronLinear.h"
 
 /**
  * Boost testing suite for testing NeuralNetworkSum.h
diff --git a/src/tests/NeuralNetwork_test.cpp b/src/tests/NeuralNetwork_test.cpp
index 95e6ebe4c73e78bc89227733ba47ccf022faa45c..27287eb677bc462eabed0d95326d67db943b5750 100644
--- a/src/tests/NeuralNetwork_test.cpp
+++ b/src/tests/NeuralNetwork_test.cpp
@@ -9,7 +9,6 @@
 
 #include <boost/test/unit_test.hpp>
 #include "../Network/NeuralNetwork.h"
-#include "../Neuron/NeuronLinear.h"
 
 /**
  * Boost testing suite for testing NeuralNetwork.h
diff --git a/src/tests/Particle_test.cpp b/src/tests/Particle_test.cpp
index 243fc92e0cf7f6b932b6601c224486f24687bfbd..4a0550aa42e04268df415c972fa49871248c1b2d 100644
--- a/src/tests/Particle_test.cpp
+++ b/src/tests/Particle_test.cpp
@@ -8,10 +8,7 @@
 #define BOOST_TEST_NO_MAIN
 
 #include <boost/test/unit_test.hpp>
-#include <iostream>
 #include "../LearningMethods/ParticleSwarm.h"
-#include "../Neuron/NeuronLinear.h"
-#include "../DataSet/DataSet.h"
 /**
  * Boost testing suite for testing ParticleSwarm.h
  * TODO
diff --git a/src/tests/boost_test_lib_dummy.cpp b/src/tests/boost_test_lib_dummy.cpp
index e13ffe1cc1c0b6ebc22ded71c38462c3e5ed1986..c9f646b4299cc88f25b86a58e3e184b38d421225 100644
--- a/src/tests/boost_test_lib_dummy.cpp
+++ b/src/tests/boost_test_lib_dummy.cpp
@@ -3,4 +3,3 @@
 //
 
 #define BOOST_TEST_MODULE neuron_test
-#include <boost/test/included/unit_test.hpp>