From fa2bdfb35c7834b462e6b0f3ce2b4c86fffdb1c3 Mon Sep 17 00:00:00 2001
From: Martin Beseda <martin.beseda@vsb.cz>
Date: Thu, 21 Feb 2019 15:47:07 +0100
Subject: [PATCH] FIX: Code fixed after merge

---
 include/4neuro.h                         |   2 -
 src/CMakeLists.txt                       |  35 ++---
 src/ErrorFunction/ErrorFunctionsMock.h   |   3 +
 src/LearningMethods/LearningSequence.cpp |   3 +-
 src/Network/NeuralNetwork.cpp            | 166 +++++++++++------------
 src/Network/NeuralNetwork.h              |  52 +++----
 src/Network/NeuralNetworkSum.cpp         |  61 +++++----
 src/Network/NeuralNetworkSum.h           |   3 +-
 src/examples/CMakeLists.txt              |   1 +
 src/examples/net_test_3.cpp              |   1 +
 src/tests/CMakeLists.txt                 |   1 +
 11 files changed, 162 insertions(+), 166 deletions(-)

diff --git a/include/4neuro.h b/include/4neuro.h
index 8012abb0..fcdac83a 100644
--- a/include/4neuro.h
+++ b/include/4neuro.h
@@ -29,8 +29,6 @@
 #include "../src/CSVReader/CSVReader.h"
 #include "../src/CrossValidator/CrossValidator.h"
 
-#include "../src/message.h"
-#include "../src/exceptions.h"
 
 // Abbreaviate lib4neuro namespace to l4n
 namespace l4n = lib4neuro;
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index bb1220fb..45920a99 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -63,6 +63,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
             NormalizationStrategy/NormalizationStrategy.cpp
             LearningMethods/GradientDescentSingleItem.cpp
             LearningMethods/LearningSequence.cpp
+            LearningMethods/GradientDescentBB.cpp
     )
 
     # FileSystem C++ library - has to be linked manually in GCC-8
@@ -81,7 +82,7 @@ if ("${BUILD_LIB}" STREQUAL "yes")
     endif()
 
     target_link_libraries(
-            lib4neuro
+        lib4neuro
 
         PRIVATE
         exprtk_wrap
@@ -93,25 +94,25 @@ if ("${BUILD_LIB}" STREQUAL "yes")
     )
 
     target_include_directories(
-            lib4neuro
+        lib4neuro
 
-            PUBLIC
-            ${ROOT_DIR}/include
+        PUBLIC
+        ${ROOT_DIR}/include
 
-            PRIVATE
-            ${EXPRTK_INCLUDE_DIR}
-            ${SRC_DIR}
-            ${Boost_INCLUDE_DIRS}
-            ${ARMADILLO_INCLUDE_DIRS}
+        PRIVATE
+        ${EXPRTK_INCLUDE_DIR}
+        ${SRC_DIR}
+        ${Boost_INCLUDE_DIRS}
+        ${ARMADILLO_INCLUDE_DIRS}
     )
 
     set_target_properties(
-            lib4neuro
+        lib4neuro
 
-            PROPERTIES
-            ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
-            LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
-            RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin"
+        PROPERTIES
+        ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
+        LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
+        RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin"
     )
 
     set(PREFIX "")
@@ -127,10 +128,10 @@ if ("${BUILD_LIB}" STREQUAL "yes")
     endif ()
 
     target_include_directories(
-            ${PREFIX}boost_unit_test
+        ${PREFIX}boost_unit_test
 
-            PRIVATE
-            ${Boost_INCLUDE_DIRS}
+        PRIVATE
+        ${Boost_INCLUDE_DIRS}
     )
 
 endif ()
diff --git a/src/ErrorFunction/ErrorFunctionsMock.h b/src/ErrorFunction/ErrorFunctionsMock.h
index 60268185..c85a783a 100644
--- a/src/ErrorFunction/ErrorFunctionsMock.h
+++ b/src/ErrorFunction/ErrorFunctionsMock.h
@@ -18,6 +18,9 @@ MOCK_BASE_CLASS(mock_ErrorFunction, lib4neuro::ErrorFunction)
     MOCK_METHOD(eval, 3)
     MOCK_METHOD(get_dimension, 0)
     MOCK_METHOD(calculate_error_gradient, 4)
+    MOCK_METHOD(eval_single_item_by_idx, 3)
+    MOCK_METHOD(calculate_error_gradient_single, 2)
+    MOCK_METHOD(analyze_error_gradient, 4)
     MOCK_METHOD(calculate_residual_gradient, 4)
     MOCK_METHOD(calculate_single_residual, 3)
     MOCK_METHOD(get_parameters, 0)
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index fbfa7f80..0bd6e138 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -6,6 +6,7 @@
  */
 
 #include "LearningSequence.h"
+#include "../message.h"
 
 namespace lib4neuro {
 
@@ -55,7 +56,7 @@ namespace lib4neuro {
                     return;
                 }
             }
-            COUT_DEBUG( "Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl );
+            COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl );
         }
     }
 }
\ No newline at end of file
diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp
index e18b7180..90ade7e8 100644
--- a/src/Network/NeuralNetwork.cpp
+++ b/src/Network/NeuralNetwork.cpp
@@ -16,7 +16,6 @@
 
 namespace lib4neuro {
     NeuralNetwork::NeuralNetwork() {
-        this->gen = boost::random::mt19937(std::time(0));
         this->neurons = new ::std::vector<Neuron *>(0);
         this->neuron_biases = new ::std::vector<double>(0);
         this->neuron_potentials = new ::std::vector<double>(0);
@@ -53,7 +52,6 @@ namespace lib4neuro {
             THROW_RUNTIME_ERROR("File '" + filepath + "' couldn't be open!");
         }
 
-        this->gen = boost::random::mt19937(std::time(0));
     }
 
     NeuralNetwork::~NeuralNetwork() {
@@ -404,6 +402,79 @@ namespace lib4neuro {
         return this->neurons->size() - 1;
     }
 
+    void NeuralNetwork::eval_single_debug(::std::vector<double> &input, ::std::vector<double> &output,
+                                          ::std::vector<double> *custom_weights_and_biases) {
+        if ((this->input_neuron_indices->size() * this->output_neuron_indices->size()) <= 0) {
+            THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
+        }
+
+        if (this->input_neuron_indices->size() != input.size()) {
+            THROW_INVALID_ARGUMENT_ERROR("Data input size != Network input size");
+        }
+
+        if (this->output_neuron_indices->size() != output.size()) {
+            THROW_INVALID_ARGUMENT_ERROR("Data output size != Network output size");
+        }
+
+        double potential, bias;
+        int bias_idx;
+
+        this->copy_parameter_space(custom_weights_and_biases);
+
+        this->analyze_layer_structure();
+
+        /* reset of the output and the neuron potentials */
+        ::std::fill(output.begin(), output.end(), 0.0);
+        ::std::fill(this->neuron_potentials->begin(), this->neuron_potentials->end(), 0.0);
+
+        /* set the potentials of the input neurons */
+        for (size_t i = 0; i < this->input_neuron_indices->size(); ++i) {
+            this->neuron_potentials->at(this->input_neuron_indices->at(i)) = input[i];
+            std::cout << this->neuron_potentials->at(this->input_neuron_indices->at(i)) << ", ";
+        }
+        std::cout << std::endl;
+
+
+
+        /* we iterate through all the feed-forward layers and transfer the signals */
+        for (auto layer: *this->neuron_layers_feedforward) {
+            /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
+
+            for (auto si: *layer) {
+                bias = 0.0;
+                bias_idx = this->neuron_bias_indices->at(si);
+                if (bias_idx >= 0) {
+                    bias = this->neuron_biases->at(bias_idx);
+                }
+                potential = this->neurons->at(si)->activate(this->neuron_potentials->at(si), bias);
+                std::cout << "  applying bias: " << bias << " to neuron potential: " << this->neuron_potentials->at(si) << " -> " << potential << std::endl;
+
+                for (auto c: *this->outward_adjacency->at(si)) {
+                    size_t ti = c.first;
+                    size_t ci = c.second;
+
+                    this->neuron_potentials->at(ti) +=
+                            this->connection_list->at(ci)->eval(*this->connection_weights) * potential;
+
+                    std::cout << "  adding input to neuron " << ti << " += " << this->connection_list->at(ci)->eval(*this->connection_weights) << "*" << potential << std::endl;
+                }
+            }
+        }
+
+        unsigned int i = 0;
+        for (auto oi: *this->output_neuron_indices) {
+            bias = 0.0;
+            bias_idx = this->neuron_bias_indices->at(oi);
+            if (bias_idx >= 0) {
+                bias = this->neuron_biases->at(bias_idx);
+            }
+            output[i] = this->neurons->at(oi)->activate(this->neuron_potentials->at(oi), bias);
+            std::cout << "setting the output[" << i << "] = " << output[i] << "(bias = " << bias << ")" << std::endl;
+            ++i;
+        }
+    }
+
+
     size_t
     NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct,
                                          size_t weight_idx) {
@@ -540,82 +611,9 @@ namespace lib4neuro {
         }
     }
 
-    void NeuralNetwork::eval_single_debug(::std::vector<double> &input, ::std::vector<double> &output,
-                                    ::std::vector<double> *custom_weights_and_biases) {
-        if ((this->input_neuron_indices->size() * this->output_neuron_indices->size()) <= 0) {
-            THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
-        }
-
-        if (this->input_neuron_indices->size() != input.size()) {
-            THROW_INVALID_ARGUMENT_ERROR("Data input size != Network input size");
-        }
-
-        if (this->output_neuron_indices->size() != output.size()) {
-            THROW_INVALID_ARGUMENT_ERROR("Data output size != Network output size");
-        }
-
-        double potential, bias;
-        int bias_idx;
-
-        this->copy_parameter_space(custom_weights_and_biases);
-
-        this->analyze_layer_structure();
-
-        /* reset of the output and the neuron potentials */
-        ::std::fill(output.begin(), output.end(), 0.0);
-        ::std::fill(this->neuron_potentials->begin(), this->neuron_potentials->end(), 0.0);
-
-        /* set the potentials of the input neurons */
-        for (size_t i = 0; i < this->input_neuron_indices->size(); ++i) {
-            this->neuron_potentials->at(this->input_neuron_indices->at(i)) = input[i];
-            std::cout << this->neuron_potentials->at(this->input_neuron_indices->at(i)) << ", ";
-        }
-        std::cout << std::endl;
-
-
-
-        /* we iterate through all the feed-forward layers and transfer the signals */
-        for (auto layer: *this->neuron_layers_feedforward) {
-            /* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
-
-            for (auto si: *layer) {
-                bias = 0.0;
-                bias_idx = this->neuron_bias_indices->at(si);
-                if (bias_idx >= 0) {
-                    bias = this->neuron_biases->at(bias_idx);
-                }
-                potential = this->neurons->at(si)->activate(this->neuron_potentials->at(si), bias);
-                std::cout << "  applying bias: " << bias << " to neuron potential: " << this->neuron_potentials->at(si) << " -> " << potential << std::endl;
-
-                for (auto c: *this->outward_adjacency->at(si)) {
-                    size_t ti = c.first;
-                    size_t ci = c.second;
-
-                    this->neuron_potentials->at(ti) +=
-                            this->connection_list->at(ci)->eval(*this->connection_weights) * potential;
-
-                    std::cout << "  adding input to neuron " << ti << " += " << this->connection_list->at(ci)->eval(*this->connection_weights) << "*" << potential << std::endl;
-                }
-            }
-        }
-
-        unsigned int i = 0;
-        for (auto oi: *this->output_neuron_indices) {
-            bias = 0.0;
-            bias_idx = this->neuron_bias_indices->at(oi);
-            if (bias_idx >= 0) {
-                bias = this->neuron_biases->at(bias_idx);
-            }
-            output[i] = this->neurons->at(oi)->activate(this->neuron_potentials->at(oi), bias);
-            std::cout << "setting the output[" << i << "] = " << output[i] << "(bias = " << bias << ")" << std::endl;
-            ++i;
-        }
-    }
-
     void NeuralNetwork::add_to_gradient_single(std::vector<double> &input, ::std::vector<double> &error_derivative,
                                                double error_scaling, ::std::vector<double> &gradient) {
 
-
         ::std::vector<double> scaling_backprog(this->get_n_neurons());
         ::std::fill(scaling_backprog.begin(), scaling_backprog.end(), 0.0);
 
@@ -683,8 +681,7 @@ namespace lib4neuro {
     }
 
     void NeuralNetwork::add_to_gradient_single_debug(std::vector<double> &input, ::std::vector<double> &error_derivative,
-                                               double error_scaling, ::std::vector<double> &gradient) {
-
+                                                     double error_scaling, ::std::vector<double> &gradient) {
 
         ::std::vector<double> scaling_backprog(this->get_n_neurons());
         ::std::fill(scaling_backprog.begin(), scaling_backprog.end(), 0.0);
@@ -762,29 +759,30 @@ namespace lib4neuro {
         }
     }
 
+
+
     void NeuralNetwork::randomize_weights() {
 
+        boost::random::mt19937 gen(std::time(0));
+
         // Init weight guess ("optimal" for logistic activation functions)
-        double r = 1.0 / (this->neuron_biases->size() + this->connection_weights->size());
+        double r = 4 * sqrt(6. / (this->connection_weights->size()));
 
         boost::random::uniform_real_distribution<> dist(-r, r);
 
         for (size_t i = 0; i < this->connection_weights->size(); i++) {
             this->connection_weights->at(i) = dist(gen);
-//            std::cout << "weight[" << i <<"]" << this->connection_weights->at(i) << std::endl;
         }
     }
 
     void NeuralNetwork::randomize_biases() {
 
+        boost::random::mt19937 gen(std::time(0));
 
-
-        double r = 1.0 / (this->neuron_biases->size() + this->connection_weights->size());
         // Init weight guess ("optimal" for logistic activation functions)
-        boost::random::uniform_real_distribution<> dist(-r, r);
+        boost::random::uniform_real_distribution<> dist(-1, 1);
         for (size_t i = 0; i < this->neuron_biases->size(); i++) {
             this->neuron_biases->at(i) = dist(gen);
-//            std::cout << "bias[" << i <<"]" << this->neuron_biases->at(i) << std::endl;
         }
     }
 
@@ -1180,7 +1178,6 @@ namespace lib4neuro {
                                          NEURON_TYPE hidden_layer_neuron_type,
                                          std::ofstream* ofs) : NeuralNetwork() {
         std::vector<NEURON_TYPE> tmp;
-        this->gen = boost::random::mt19937(std::time(0));
 
         for(auto i = 0; i < neuron_numbers->size(); i++) {
             tmp.emplace_back(hidden_layer_neuron_type);
@@ -1202,7 +1199,6 @@ namespace lib4neuro {
             THROW_INVALID_ARGUMENT_ERROR("Parameter 'neuron_numbers' specifying numbers of neurons in network's layers "
                                          "doesn't specify input and output layers, which are compulsory!");
         }
-        this->gen = boost::random::mt19937(std::time(0));
 
         this->neurons = new ::std::vector<Neuron *>(0);
         this->neuron_biases = new ::std::vector<double>(0);
diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h
index 2e6218ab..8a1e7af4 100644
--- a/src/Network/NeuralNetwork.h
+++ b/src/Network/NeuralNetwork.h
@@ -13,13 +13,7 @@
 
 #include <iostream>
 #include <vector>
-#include <iostream>
-#include <cstdio>
-#include <fstream>
-#include <vector>
-#include <utility>
-#include <algorithm>
-#include <assert.h>
+
 #include <algorithm>
 #include <utility>
 #include <fstream>
@@ -34,10 +28,6 @@
 #include "../NetConnection/ConnectionFunctionIdentity.h"
 #include "../NormalizationStrategy/NormalizationStrategy.h"
 
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_int_distribution.hpp>
-#include <boost/random/uniform_real_distribution.hpp>
-
 namespace lib4neuro {
 
     /**
@@ -60,7 +50,6 @@ namespace lib4neuro {
      */
     class NeuralNetwork {
     protected:
-        boost::random::mt19937 gen;
 
         /**
          *
@@ -173,6 +162,25 @@ namespace lib4neuro {
 
     public:
 
+        /**
+        *
+        * @param input
+        * @param output
+        * @param custom_weights_and_biases
+        */
+        LIB4NEURO_API virtual void eval_single_debug(std::vector<double> &input, std::vector<double> &output,
+                                                     std::vector<double> *custom_weights_and_biases = nullptr);
+
+
+        /**
+          *
+          * @param error_derivative
+          * @param gradient
+          */
+        LIB4NEURO_API virtual void
+        add_to_gradient_single_debug(std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling,
+                                     std::vector<double> &gradient);
+
         /**
          * Struct used to access private properties from
          * the serialization function
@@ -227,15 +235,6 @@ namespace lib4neuro {
         LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output,
                                                std::vector<double> *custom_weights_and_biases = nullptr);
 
-        /**
-         *
-         * @param input
-         * @param output
-         * @param custom_weights_and_biases
-         */
-        LIB4NEURO_API virtual void eval_single_debug(std::vector<double> &input, std::vector<double> &output,
-                                               std::vector<double> *custom_weights_and_biases = nullptr);
-
         /**
          *
          * @param error_derivative
@@ -245,15 +244,6 @@ namespace lib4neuro {
         add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling,
                                std::vector<double> &gradient);
 
-        /**
-          *
-          * @param error_derivative
-          * @param gradient
-          */
-        LIB4NEURO_API virtual void
-        add_to_gradient_single_debug(std::vector<double> &input, std::vector<double> &error_derivative, double error_scaling,
-                               std::vector<double> &gradient);
-
         /**
          * Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
          * @param[in] n
@@ -461,6 +451,8 @@ namespace lib4neuro {
                                                  std::vector<NEURON_TYPE>* hidden_layer_neuron_types,
                                                  std::ofstream* ofs = nullptr);
 
+
+
     private:
         void init(std::vector<unsigned int>* neuron_numbers,
                   std::vector<NEURON_TYPE>* hidden_layer_neuron_types,
diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp
index 7763b63d..16dafc1e 100644
--- a/src/Network/NeuralNetworkSum.cpp
+++ b/src/Network/NeuralNetworkSum.cpp
@@ -78,36 +78,6 @@ namespace lib4neuro {
 
     }
 
-    void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output,
-                                       std::vector<double> *custom_weights_and_biases) {
-        std::vector<double> mem_output(output.size());
-        std::fill(output.begin(), output.end(), 0.0);
-
-        NeuralNetwork *SUM;
-
-        for (size_t ni = 0; ni < this->summand->size(); ++ni) {
-            SUM = this->summand->at(ni);
-
-            if (SUM) {
-                this->summand->at(ni)->eval_single_debug(input, mem_output, custom_weights_and_biases);
-
-                double alpha = this->summand_coefficient->at(ni)->eval(input);
-
-                for (size_t j = 0; j < output.size(); ++j) {
-                    output[j] += mem_output[j] * alpha;
-                }
-            } else {
-                //TODO assume the result can be a vector of doubles
-                double alpha = this->summand_coefficient->at(ni)->eval(input);
-
-                for (size_t j = 0; j < output.size(); ++j) {
-                    output[j] += alpha;
-                }
-            }
-        }
-
-    }
-
     void NeuralNetworkSum::add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative,
                                                   double error_scaling, std::vector<double> &gradient) {
 
@@ -184,4 +154,35 @@ namespace lib4neuro {
         return nullptr;
     }
 
+    void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output,
+                                             std::vector<double> *custom_weights_and_biases) {
+        std::vector<double> mem_output(output.size());
+        std::fill(output.begin(), output.end(), 0.0);
+
+        NeuralNetwork *SUM;
+
+        for (size_t ni = 0; ni < this->summand->size(); ++ni) {
+            SUM = this->summand->at(ni);
+
+            if (SUM) {
+                this->summand->at(ni)->eval_single_debug(input, mem_output, custom_weights_and_biases);
+
+                double alpha = this->summand_coefficient->at(ni)->eval(input);
+
+                for (size_t j = 0; j < output.size(); ++j) {
+                    output[j] += mem_output[j] * alpha;
+                }
+            } else {
+                //TODO assume the result can be a vector of doubles
+                double alpha = this->summand_coefficient->at(ni)->eval(input);
+
+                for (size_t j = 0; j < output.size(); ++j) {
+                    output[j] += alpha;
+                }
+            }
+        }
+
+    }
+
+
 }
\ No newline at end of file
diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h
index 42127a26..c20bb888 100644
--- a/src/Network/NeuralNetworkSum.h
+++ b/src/Network/NeuralNetworkSum.h
@@ -50,7 +50,8 @@ namespace lib4neuro {
          * @param custom_weights_and_biases
          */
         LIB4NEURO_API void eval_single_debug(std::vector<double> &input, std::vector<double> &output,
-                                       std::vector<double> *custom_weights_and_biases = nullptr) override;
+                                             std::vector<double> *custom_weights_and_biases = nullptr) override;
+
 
         /**
          *
diff --git a/src/examples/CMakeLists.txt b/src/examples/CMakeLists.txt
index 36be7dde..9382a79c 100644
--- a/src/examples/CMakeLists.txt
+++ b/src/examples/CMakeLists.txt
@@ -77,6 +77,7 @@ target_include_directories(
         net_test_3
         PRIVATE
         ${ROOT_DIR}/include
+        ${Boost_INCLUDE_DIRS}
 )
 
 target_include_directories(
diff --git a/src/examples/net_test_3.cpp b/src/examples/net_test_3.cpp
index da241cf7..942fedbe 100644
--- a/src/examples/net_test_3.cpp
+++ b/src/examples/net_test_3.cpp
@@ -9,6 +9,7 @@
 #include <utility>
 #include <algorithm>
 #include <assert.h>
+#include <ctime>
 
 #include <4neuro.h>
 
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index bbaefb54..97f08efe 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -54,6 +54,7 @@ add_executable(DESolver_test DESolver_test.cpp)
 target_link_libraries(DESolver_test lib4neuro boost_unit_test)
 target_include_directories(DESolver_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
 
+#TODO fix GradientDescent test
 #add_executable(GradientDescent_test GradientDescent_test.cpp)
 #target_link_libraries(GradientDescent_test lib4neuro boost_unit_test)
 #target_include_directories(GradientDescent_test PRIVATE ${Boost_INCLUDE_DIRS} ${TURTLE_INCLUDE_DIR})
-- 
GitLab