From dd1e7fd8d224f20006e406afa72e95e7ebd4dd93 Mon Sep 17 00:00:00 2001
From: Martin Beseda <martin.beseda@vsb.cz>
Date: Thu, 15 Nov 2018 19:35:54 +0100
Subject: [PATCH] TMP: Implementing SIMULATOR example

---
 external_dependencies/boost              |   2 +-
 external_dependencies/exprtk             |   2 +-
 include/4neuro.h                         |   1 +
 src/CMakeLists.txt                       |   1 +
 src/CSVReader/CSVReader.cpp              |  10 +--
 src/CrossValidator/CrossValidator.cpp    |  27 ++++++
 src/CrossValidator/CrossValidator.h      |  48 ++++++++++
 src/ErrorFunction/ErrorFunctions.cpp     | 104 ++++++++++++++++++----
 src/ErrorFunction/ErrorFunctions.h       |  58 ++++++++++--
 src/LearningMethods/ILearningMethods.cpp |   3 +-
 src/LearningMethods/ILearningMethods.h   |  12 ++-
 src/LearningMethods/ParticleSwarm.h      |   7 +-
 src/Network/NeuralNetwork.cpp            | 107 +++++++++++++++++++++--
 src/Network/NeuralNetwork.h              |  13 ++-
 src/Neuron/Neuron.h                      |  13 ++-
 src/Neuron/NeuronLinear.cpp              |   1 -
 src/examples/main.cpp                    |  33 ++++++-
 src/examples/net_test_1.cpp              |   1 +
 18 files changed, 384 insertions(+), 59 deletions(-)
 create mode 100644 src/CrossValidator/CrossValidator.cpp
 create mode 100644 src/CrossValidator/CrossValidator.h

diff --git a/external_dependencies/boost b/external_dependencies/boost
index 35e0ef02..b152644d 160000
--- a/external_dependencies/boost
+++ b/external_dependencies/boost
@@ -1 +1 @@
-Subproject commit 35e0ef020057dce87e4ed65d3f34d28e12a0d411
+Subproject commit b152644d7bb2cfa9bd3e19c9d1e397d5ba240efc
diff --git a/external_dependencies/exprtk b/external_dependencies/exprtk
index 9836f21d..b3b4cee1 160000
--- a/external_dependencies/exprtk
+++ b/external_dependencies/exprtk
@@ -1 +1 @@
-Subproject commit 9836f21d07b1bf799e6877324268708f61c01f73
+Subproject commit b3b4cee1c52baf935d68fe3bb7fb1a0ec6b79694
diff --git a/include/4neuro.h b/include/4neuro.h
index 3dd291b9..1bded1ed 100644
--- a/include/4neuro.h
+++ b/include/4neuro.h
@@ -18,6 +18,7 @@
 #include "../src/Solvers/DESolver.h"
 #include "../src/ErrorFunction/ErrorFunctions.h"
 #include "../src/constants.h"
+#include "../src/CSVReader/CSVReader.h"
 
 // Abbreaviate lib4neuro namespace to l4n
 namespace l4n = lib4neuro;
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 04096060..5d018c5c 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -48,6 +48,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
 		Solvers/DESolver.cpp
 		Exception/Exceptions.cpp
 		CSVReader/CSVReader.cpp
+		CrossValidator/CrossValidator.cpp
 	)
 
     target_link_libraries(
diff --git a/src/CSVReader/CSVReader.cpp b/src/CSVReader/CSVReader.cpp
index 533cd1cc..596d2547 100644
--- a/src/CSVReader/CSVReader.cpp
+++ b/src/CSVReader/CSVReader.cpp
@@ -27,6 +27,7 @@ namespace lib4neuro {
 
         /* Read single line from the file */
         while(std::getline(ifs, line)) {
+
             /* Ignore empty line */
             if(line == "") {
                 continue;
@@ -67,29 +68,20 @@ namespace lib4neuro {
 
         std::vector<std::pair<std::vector<double>, std::vector<double>>> data_set_contents;
 
-        int i = 0;
         for(auto line : *this->data) {
-
-            std::cout << i++ << std::endl;
-
             //TODO check for non-numerical (or empty) values in data
             std::vector<double> input;
             for(auto ind : *input_col_indices) {
                 input.push_back(std::stod(line.at(ind)));
             }
 
-            std::cout << "a" << std::endl;
-
             std::vector<double> output;
             for(auto ind : *output_col_indices) {
                 output.emplace_back(std::stod(line.at(ind)));
             }
 
-            std::cout << "b" << std::endl;
-
             data_set_contents.push_back(std::make_pair(input, output));
         }
-        int a = 1;
 
         return DataSet(&data_set_contents);
     }
diff --git a/src/CrossValidator/CrossValidator.cpp b/src/CrossValidator/CrossValidator.cpp
new file mode 100644
index 00000000..4a129271
--- /dev/null
+++ b/src/CrossValidator/CrossValidator.cpp
@@ -0,0 +1,27 @@
+//
+// Created by martin on 14.11.18.
+//
+
+#include "CrossValidator.h"
+#include "../message.h"
+
+namespace lib4neuro {
+    LIB4NEURO_API CrossValidator::CrossValidator(ILearningMethods* optimizer, ErrorFunction* ef) {
+        this->optimizer = optimizer;
+        this->ef = ef;
+    }
+
+    LIB4NEURO_API void CrossValidator::run_k_fold_test(unsigned int k, unsigned int tests_number) {
+        for(unsigned int i = 0; i < tests_number; i++) {
+            std::cout << "Cross-validation run " << i+1 << std::endl;
+
+            this->ef->divide_data_train_test(1.0/k);
+            this->ef->get_network_instance()->print_weights();
+            this->ef->get_network_instance()->randomize_weights();
+//            this->ef->get_network_instance()->print_stats();
+            this->ef->get_network_instance()->print_weights();
+            this->optimizer->optimize(*this->ef);
+            this->ef->return_full_data_set_for_training();
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/CrossValidator/CrossValidator.h b/src/CrossValidator/CrossValidator.h
new file mode 100644
index 00000000..a6db416d
--- /dev/null
+++ b/src/CrossValidator/CrossValidator.h
@@ -0,0 +1,48 @@
+//
+// Created by martin on 14.11.18.
+//
+
+#ifndef LIB4NEURO_CROSSVALIDATOR_H
+#define LIB4NEURO_CROSSVALIDATOR_H
+
+#include "../settings.h"
+#include "../DataSet/DataSet.h"
+#include "../LearningMethods/ILearningMethods.h"
+
+namespace lib4neuro {
+
+    /**
+     *
+     */
+    class CrossValidator {
+    private:
+
+        /**
+         *
+         */
+        ILearningMethods* optimizer;
+
+        /**
+         *
+         */
+        ErrorFunction* ef;
+
+    public:
+
+        /**
+         *
+         * @param optimizer
+         * @param data_set
+         */
+        LIB4NEURO_API CrossValidator(ILearningMethods* optimizer, ErrorFunction* ef);
+
+        /**
+         *
+         * @param k
+         * @param tests_number
+         */
+        LIB4NEURO_API void run_k_fold_test(unsigned int k, unsigned int test_number);
+    };
+}
+
+#endif //LIB4NEURO_CROSSVALIDATOR_H
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index 7751eefa..3e7afd6b 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -3,6 +3,9 @@
 //
 
 #include <vector>
+#include <cmath>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/uniform_int_distribution.hpp>
 
 #include "ErrorFunctions.h"
 
@@ -12,47 +15,93 @@ namespace lib4neuro {
         return this->dimension;
     }
 
+    NeuralNetwork* ErrorFunction::get_network_instance() {
+        return this->net;
+    }
+
+    void ErrorFunction::divide_data_train_test(double percent_test) {
+        size_t ds_size = this->ds->get_n_elements();
+
+        /* Store the full data set */
+        this->ds_full = this->ds;
+
+        /* Choose random subset of the DataSet for training and the remaining part for validation */
+        boost::random::mt19937 gen;
+        boost::random::uniform_int_distribution<> dist(0, ds_size - 1);
+
+        size_t test_set_size = ceil(ds_size * percent_test);
+
+        std::vector<unsigned int> test_indices;
+        test_indices.reserve(test_set_size);
+        for (unsigned int i = 0; i < test_set_size; i++) {
+            test_indices.emplace_back(dist(gen));
+        }
+        std::sort(test_indices.begin(), test_indices.end(), std::greater<unsigned int>());
+
+        std::vector<std::pair<std::vector<double>, std::vector<double>>> test_data, train_data;
+
+        /* Copy all the data to train_data */
+        for(auto e : *this->ds_full->get_data()) {
+            train_data.emplace_back(e);
+        }
+
+        /* Move the testing data from train_data to test_data */
+        for(auto ind : test_indices) {
+            test_data.emplace_back(train_data.at(ind));
+            train_data.erase(train_data.begin() + ind);
+        }
+
+        /* Re-initialize data set for training */
+        this->ds = new DataSet(&train_data);
+
+        /* Initialize test data */
+        this->ds_test = new DataSet(&test_data);
+    }
+
+    void ErrorFunction::return_full_data_set_for_training() {
+        if(this->ds_test) {
+            this->ds = this->ds_full;
+        }
+    }
+
     MSE::MSE(NeuralNetwork *net, DataSet *ds) {
         this->net = net;
         this->ds = ds;
         this->dimension = net->get_n_weights() + net->get_n_biases();
     }
 
-    double MSE::eval(std::vector<double> *weights) {
-        size_t dim_out = this->ds->get_output_dim();
-//    unsigned int dim_in = this->ds->get_input_dim();
-        size_t n_elements = this->ds->get_n_elements();
+    double MSE::eval_general(DataSet* data_set, std::vector<double> *weights) {
+        size_t dim_out = data_set->get_output_dim();
+        size_t n_elements = data_set->get_n_elements();
         double error = 0.0, val;
 
-        std::vector<std::pair<std::vector<double>, std::vector<double>>> *data = this->ds->get_data();
-
-//    //TODO instead use something smarter
-//    this->net->copy_weights(weights);
+        std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = data_set->get_data();
 
+        //TODO instead use something smarter
         std::vector<double> output(dim_out);
 
         for (auto el: *data) {  // Iterate through every element in the test set
 
-            this->net->eval_single(el.first, output,
+            this->net->eval_single(el.first,
+                                   output,
                                    weights);  // Compute the net output and store it into 'output' variable
 
-
-//        printf("errors: ");
             for (size_t j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
-
                 val = output[j] - el.second[j];
                 error += val * val;
-
-//            printf("%f, ", val * val);
             }
-//        printf("\n");
-
         }
-
-//    printf("n_elements: %d\n", n_elements);
         return error / n_elements;
     }
 
+    double MSE::eval(std::vector<double> *weights) {
+        return this->eval_general(this->ds, weights);
+    }
+
+    double MSE::eval_on_test_data(std::vector<double> *weights) {
+        return this->eval_general(this->ds_test, weights);
+    }
+
     void MSE::calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha) {
 
         size_t dim_out = this->ds->get_output_dim();
@@ -109,7 +158,24 @@ namespace lib4neuro {
         }
     }
 
-    double ErrorSum::eval(std::vector<double> *weights) {
+    double ErrorSum::eval_on_test_data(std::vector<double>* weights) {
+        //TODO take care of the case, when there are no test data
+
+        double output = 0.0;
+        ErrorFunction *ef = nullptr;
+
+        for(unsigned int i = 0; i < this->summand->size(); ++i) {
+            ef = this->summand->at(i);
+
+            if (ef) {
+                output += ef->eval_on_test_data(weights) * this->summand_coefficient->at(i);
+            }
+        }
+
+        return output;
+    }
+
+    double ErrorSum::eval(std::vector<double>* weights) {
         double output = 0.0;
         ErrorFunction *ef = nullptr;
 
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 989cecfd..f3907498 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -25,7 +25,7 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval(std::vector<double> *weights = nullptr) = 0;
+        virtual double eval(std::vector<double>* weights) = 0;
 
         /**
          *
@@ -39,8 +39,7 @@ namespace lib4neuro {
          * @param grad
          */
         virtual void
-        calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha = 1.0) = 0;
-
+        calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha=1.0) = 0;
 
         /**
          *
@@ -52,7 +51,30 @@ namespace lib4neuro {
          * //TODO delete after gradient learning is debugged
          * @return
          */
-        virtual DataSet *get_dataset() = 0;
+        virtual DataSet* get_dataset() = 0;
+
+        /**
+         *
+         * @return
+         */
+        NeuralNetwork* get_network_instance();
+
+        /**
+         *
+         * @param percent_train
+         * @return
+         */
+        void divide_data_train_test(double percent_test);
+
+        /**
+         *
+         */
+        void return_full_data_set_for_training();
+
+        /**
+         *
+         */
+        virtual double eval_on_test_data(std::vector<double>* weights = nullptr) = 0;
 
     protected:
 
@@ -64,7 +86,22 @@ namespace lib4neuro {
         /**
          *
          */
-        NeuralNetwork *net = nullptr;
+        NeuralNetwork* net = nullptr;
+
+        /**
+         *
+         */
+        DataSet* ds = nullptr;
+
+        /**
+         *
+         */
+        DataSet* ds_full = nullptr;
+
+        /**
+         *
+         */
+        DataSet* ds_test = nullptr;
     };
 
     class MSE : public ErrorFunction {
@@ -102,9 +139,11 @@ namespace lib4neuro {
             return this->ds;
         };
 
+        LIB4NEURO_API double eval_on_test_data(std::vector<double> *weights = nullptr) override;
+
     private:
+        double eval_general(DataSet* data_set, std::vector<double>* weights = nullptr);
 
-        DataSet *ds;
     };
 
     class ErrorSum : public ErrorFunction {
@@ -126,6 +165,13 @@ namespace lib4neuro {
          */
         LIB4NEURO_API double eval(std::vector<double> *weights = nullptr) override;
 
+        /**
+         *
+         * @param weights
+         * @return
+         */
+        LIB4NEURO_API double eval_on_test_data(std::vector<double> *weights = nullptr) override;
+
         /**
          *
          * @param F
diff --git a/src/LearningMethods/ILearningMethods.cpp b/src/LearningMethods/ILearningMethods.cpp
index 6aa47daf..d0bb4b1c 100644
--- a/src/LearningMethods/ILearningMethods.cpp
+++ b/src/LearningMethods/ILearningMethods.cpp
@@ -5,5 +5,4 @@
  * @date 10.9.18 -
  */
 
-#include "ILearningMethods.h"
-
+#include "ILearningMethods.h"
\ No newline at end of file
diff --git a/src/LearningMethods/ILearningMethods.h b/src/LearningMethods/ILearningMethods.h
index 9e3b2522..67204140 100644
--- a/src/LearningMethods/ILearningMethods.h
+++ b/src/LearningMethods/ILearningMethods.h
@@ -11,6 +11,14 @@
 #include <vector>
 #include "../ErrorFunction/ErrorFunctions.h"
 
+
+namespace lib4neuro {
+    enum LearningMethodType {
+        MethodGradientDescent,
+        MethodParticleSwarm
+    };
+}
+
 class ILearningMethods {
 private:
 
@@ -20,12 +28,12 @@ private:
     lib4neuro::ErrorFunction *ef = nullptr;
 
 public:
-    /*
+    /**
      * Runs the method specific learning algorithm minimizing the given error function
      */
     virtual void optimize( lib4neuro::ErrorFunction &ef ) = 0;
 
-    /*
+    /**
      * Updates the optimal weight&bias settings in the passed vector
      */
     virtual std::vector<double>* get_parameters( ) = 0;
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index 78f4060e..0a918031 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -104,7 +104,7 @@ namespace lib4neuro {
         /**
          *
          */
-        lib4neuro::ErrorFunction *f;
+//        lib4neuro::ErrorFunction *f;
 
         size_t func_dim;
 
@@ -126,7 +126,7 @@ namespace lib4neuro {
 
         double delta;
 
-        double global_optimal_value;
+//        double global_optimal_value;
 
         std::vector<double> *domain_bounds = nullptr;
 
@@ -187,7 +187,6 @@ namespace lib4neuro {
          */
         LIB4NEURO_API ~ParticleSwarm();
 
-
         /**
          *
          * @param gamma
@@ -201,8 +200,6 @@ namespace lib4neuro {
          * @return
          */
         LIB4NEURO_API std::vector<double> *get_parameters() override;
-
-
     };
 
 }
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index ca276d60..a5ac679f 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -6,11 +6,15 @@
  */
 
 #include <iostream>
+#include <4neuro.h>
 
 #include "../message.h"
 #include "NeuralNetwork.h"
 #include "NeuralNetworkSerialization.h"
 
+/* Seed for the random number generator */
+boost::random::mt19937 gen;
+
 namespace lib4neuro {
     NeuralNetwork::NeuralNetwork() {
         this->neurons = new ::std::vector<Neuron *>(0);
@@ -459,19 +463,16 @@ namespace lib4neuro {
     void NeuralNetwork::eval_single(std::vector<double> &input, ::std::vector<double> &output,
                                     ::std::vector<double> *custom_weights_and_biases) {
         if ((this->input_neuron_indices->size() * this->output_neuron_indices->size()) <= 0) {
-            ::std::cerr << "Input and output neurons have not been specified\n" << ::std::endl;
-            exit(-1);
+            throw std::invalid_argument("Input and output neurons have not been specified!");
         }
 
 
         if (this->input_neuron_indices->size() != input.size()) {
-            ::std::cerr << "Error, input size != Network input size\n" << ::std::endl;
-            exit(-1);
+            throw std::invalid_argument("Error: input size != Network input size");
         }
 
         if (this->output_neuron_indices->size() != output.size()) {
-            ::std::cerr << "Error, output size != Network output size\n" << ::std::endl;
-            exit(-1);
+            throw std::invalid_argument("Error: output size != Network output size");
         }
         double potential, bias;
         int bias_idx;
@@ -594,7 +595,7 @@ namespace lib4neuro {
 
     void NeuralNetwork::randomize_weights() {
 
-        boost::random::mt19937 gen;
+//        boost::random::mt19937 gen;
 
         // Init weight guess ("optimal" for logistic activation functions)
         double r = 4 * sqrt(6. / (this->connection_weights->size()));
@@ -608,7 +609,7 @@ namespace lib4neuro {
 
     void NeuralNetwork::randomize_biases() {
 
-        boost::random::mt19937 gen;
+//        boost::random::mt19937 gen;
 
         // Init weight guess ("optimal" for logistic activation functions)
         boost::random::uniform_real_distribution<> dist(-1, 1);
@@ -864,5 +865,95 @@ namespace lib4neuro {
         }
     }
 
+    FullyConnectedFFN::FullyConnectedFFN(std::vector<unsigned int>* neuron_numbers, NeuronType hidden_layer_neuron_type) : NeuralNetwork() {
+        if(neuron_numbers->size() < 2) {
+            throw std::invalid_argument("Parameter 'neuron_numbers' specifying numbers of neurons in network's layers "
+                                        "doesn't specify input and output layers, which are compulsory!");
+        }
+
+        unsigned int inp_dim = neuron_numbers->at(0);  //!< Network input dimension
+        unsigned int out_dim = neuron_numbers->back(); //!< Network output dimension
+
+        std::vector<size_t> input_layer_neuron_indices;
+        std::vector<size_t> previous_layer_neuron_indices;
+        std::vector<size_t> current_layer_neuron_indices;
+
+        /* Creation of INPUT layer neurons */
+        current_layer_neuron_indices.reserve(inp_dim);
+        input_layer_neuron_indices.reserve(inp_dim);
+        for(unsigned int i = 0; i < inp_dim; i++) {
+            size_t neuron_id = this->add_neuron(new NeuronLinear, BIAS_TYPE::NO_BIAS);
+            current_layer_neuron_indices.emplace_back(neuron_id);
+        }
+        input_layer_neuron_indices = current_layer_neuron_indices;
+
+        /* Creation of HIDDEN layers */
+
+        for(unsigned int i = 1; i <= neuron_numbers->size()-2; i++) {
+            previous_layer_neuron_indices.reserve(neuron_numbers->at(i-1));
+            previous_layer_neuron_indices = current_layer_neuron_indices;
+            current_layer_neuron_indices.clear();
+            current_layer_neuron_indices.reserve(neuron_numbers->at(i));
+
+            /* Creation of one single hidden layer */
+            for(unsigned int j = 0; j < neuron_numbers->at(i); j++) {
+                size_t neuron_id;
+
+                /* Create new hidden neuron */
+                switch (hidden_layer_neuron_type) {
+                    case NeuronType::BINARY: {
+                        neuron_id = this->add_neuron(new NeuronBinary, BIAS_TYPE::NEXT_BIAS);
+                        break;
+                    }
+
+                    case NeuronType::CONSTANT: {
+                        neuron_id = this->add_neuron(new NeuronConstant, BIAS_TYPE::NEXT_BIAS);
+                        break;
+                    }
+
+                    case NeuronType::LINEAR: {
+                        neuron_id = this->add_neuron(new NeuronLinear, BIAS_TYPE::NEXT_BIAS);
+                        break;
+                    }
+
+                    case NeuronType::LOGISTIC: {
+                        neuron_id = this->add_neuron(new NeuronLogistic, BIAS_TYPE::NEXT_BIAS);
+                        break;
+                    }
+                }
+
+                current_layer_neuron_indices.emplace_back(neuron_id);
+
+                /* Connect new neuron with all neuron from the previous layer */
+                for(auto ind : previous_layer_neuron_indices) {
+                    this->add_connection_simple(ind, neuron_id, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+                }
+            }
+        }
+
+        previous_layer_neuron_indices.reserve(neuron_numbers->back()-1);
+        previous_layer_neuron_indices = current_layer_neuron_indices;
+        current_layer_neuron_indices.clear();
+        current_layer_neuron_indices.reserve(out_dim);
+
+        /* Creation of OUTPUT layer neurons */
+        for(unsigned int i = 0; i < out_dim; i++) {
+            size_t neuron_id = this->add_neuron(new NeuronLinear, BIAS_TYPE::NO_BIAS);
+            current_layer_neuron_indices.emplace_back(neuron_id);
+
+            /* Connect new neuron with all neuron from the previous layer */
+            for(auto ind : previous_layer_neuron_indices) {
+                this->add_connection_simple(ind, neuron_id, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
+            }
+        }
+
+        /* Init variables containing indices of INPUT nad OUTPUT neurons */
+        this->input_neuron_indices = new ::std::vector<size_t>(inp_dim);
+        this->output_neuron_indices = new ::std::vector<size_t>(out_dim);
+
+        *this->input_neuron_indices = input_layer_neuron_indices;
+        *this->output_neuron_indices = current_layer_neuron_indices;
+    }
+
 }
 
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index abe93a14..e4b5141d 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -43,7 +43,7 @@ namespace lib4neuro {
  *
  */
     class NeuralNetwork {
-    private:
+    protected:
 
         /**
          *
@@ -335,6 +335,17 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void save_text(std::string filepath);
 
+    }; // class NeuralNetwork
+
+    class FullyConnectedFFN: public NeuralNetwork {
+    public:
+
+        /**
+         * Constructs a fully conected feed-forward neural network
+         * @param neuron_numbers Pointer to vector containing number of vectors in every layer (from input to output)
+         * @param hidden_layer_neuron_type
+         */
+        LIB4NEURO_API explicit FullyConnectedFFN(std::vector<unsigned int>* neuron_numbers, NeuronType hidden_layer_neuron_type);
     };
 
 }
diff --git a/src/Neuron/Neuron.h b/src/Neuron/Neuron.h
index c5c60acf..fd81dde8 100644
--- a/src/Neuron/Neuron.h
+++ b/src/Neuron/Neuron.h
@@ -15,9 +15,16 @@
 
 namespace lib4neuro {
 
-/**
-  * Abstract class representing a general neuron
-  */
+    enum NeuronType {
+        BINARY,
+        CONSTANT,
+        LINEAR,
+        LOGISTIC
+    };
+
+    /**
+      * Abstract class representing a general neuron
+      */
     class Neuron {
 
     public:
diff --git a/src/Neuron/NeuronLinear.cpp b/src/Neuron/NeuronLinear.cpp
index b095ea87..b412eef0 100644
--- a/src/Neuron/NeuronLinear.cpp
+++ b/src/Neuron/NeuronLinear.cpp
@@ -15,7 +15,6 @@ namespace lib4neuro {
     NeuronLinear::NeuronLinear() {}
 
     double NeuronLinear::activate(double x, double b) {
-
         return x + b;
     }
 
diff --git a/src/examples/main.cpp b/src/examples/main.cpp
index 6010c144..5c766604 100644
--- a/src/examples/main.cpp
+++ b/src/examples/main.cpp
@@ -13,9 +13,40 @@
 #include <algorithm>
 
 #include "4neuro.h"
-
+#include "../CrossValidator/CrossValidator.h"
 
 int main(int argc, char** argv){
 
+    l4n::CSVReader reader("/home/martin/Desktop/ANN_DATA_1_SET.txt", "\t", true);
+    reader.read();
+
+//    std::vector<std::vector<std::string>>* data = reader.get_data();
+
+    std::vector<unsigned int> inputs = {2,3,4,5,6,7,8,26,27,28};
+    std::vector<unsigned int> outputs = {17,18,19,20,21,22,23,24,25};
+    l4n::DataSet ds = reader.get_data_set(&inputs, &outputs);
+
+    /* Neural network construction */
+    std::vector<unsigned int> neuron_numbers_in_layers = {10,9};
+    l4n::FullyConnectedFFN nn(&neuron_numbers_in_layers, l4n::NeuronType::LOGISTIC);
+
+    /* Error function */
+    l4n::MSE mse(&nn, &ds);
+
+    /* Domain */
+    std::vector<double> domain_bounds(2 * (nn.get_n_weights() + nn.get_n_biases()));
+
+    for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
+        domain_bounds[2 * i] = -20;
+        domain_bounds[2 * i + 1] = 20;
+    }
+    /* Training method */
+//    l4n::ParticleSwarm ps(&domain_bounds);
+    l4n::GradientDescent gs;
+
+    /* Cross - validation */
+    l4n::CrossValidator cv(&gs, &mse);
+    cv.run_k_fold_test(10, 3);
+
     return 0;
 }
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index 3a1712e7..e35c504b 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -129,6 +129,7 @@ int main() {
 
     net.specify_input_neurons(net_input_neurons_indices);
     net.specify_output_neurons(net_output_neurons_indices);
+
     /* ERROR FUNCTION SPECIFICATION */
     l4n::MSE mse(&net, &ds);
 
-- 
GitLab