From 5d897b89daa71f204f60402adc586fd6297c1d07 Mon Sep 17 00:00:00 2001
From: Martin Beseda <martin.beseda@vsb.cz>
Date: Fri, 22 Mar 2019 11:40:30 +0100
Subject: [PATCH] [ENH] [RUNTIME ERROR]: moved 'optimal_parameters' variable
 and its getter to the LearningMethod class.

---
 src/LearningMethods/GradientDescent.cpp       | 10 +----
 src/LearningMethods/GradientDescent.h         | 10 -----
 src/LearningMethods/GradientDescentBB.cpp     | 10 +----
 src/LearningMethods/GradientDescentBB.h       | 10 -----
 .../GradientDescentSingleItem.cpp             |  6 ---
 .../GradientDescentSingleItem.h               |  6 ---
 src/LearningMethods/LearningMethod.h          |  9 ++++-
 src/LearningMethods/LearningMethods.cpp       |  4 ++
 src/LearningMethods/LearningSequence.cpp      | 17 ++++-----
 src/LearningMethods/LearningSequence.h        | 11 ------
 src/LearningMethods/LevenbergMarquardt.cpp    | 15 +-------
 src/LearningMethods/LevenbergMarquardt.h      |  2 -
 src/LearningMethods/ParticleSwarm.cpp         | 38 ++++++++-----------
 src/LearningMethods/ParticleSwarm.h           | 10 -----
 src/LearningMethods/RandomSolution.cpp        |  8 ----
 src/LearningMethods/RandomSolution.h          |  6 ---
 src/Solvers/DESolver.cpp                      |  2 +-
 17 files changed, 42 insertions(+), 132 deletions(-)

diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index c82db8d6..15cd6c30 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -295,20 +295,14 @@ namespace lib4neuro {
 #endif
         }
 
-        this->optimal_parameters = params_current;
+        this->optimal_parameters = *params_current;
 //        std::shared_ptr<std::vector<double>> params;
 //        params.reset(this->optimal_parameters);
-        ef.get_network_instance()->copy_parameter_space( this->optimal_parameters );
+        ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
 
 //        delete gradient_current;
 //        delete gradient_prev;
 //        delete params_current;
 //        delete params_prev;
     }
-
-    std::vector<double> GradientDescent::get_parameters() {
-//        std::shared_ptr<std::vector<double>> ret;
-//        ret.reset(&this->optimal_parameters);
-        return std::vector<double>(*this->optimal_parameters);
-    }
 }
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index e8db77af..ff1a7b3f 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -41,11 +41,6 @@ namespace lib4neuro {
          */
         long long int maximum_niters;
 
-        /**
-         * Vector of minima coordinates
-         */
-        std::vector<double>* optimal_parameters = new std::vector<double>(5);
-
         /**
          * Adaptive calculation of the step-size based on several historical characteristics.
          * ----------------------------------------------------------------------------------
@@ -112,11 +107,6 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
 
-        /**
-         *
-         * @return
-         */
-        LIB4NEURO_API std::vector<double> get_parameters() override;
     };
 }
 
diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp
index 55d5043c..823edc1f 100644
--- a/src/LearningMethods/GradientDescentBB.cpp
+++ b/src/LearningMethods/GradientDescentBB.cpp
@@ -209,12 +209,12 @@ namespace lib4neuro {
 #endif
         }
 
-        this->optimal_parameters = params_best;
+        this->optimal_parameters = *params_best;
 
 //        ef.analyze_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
 //        std::vector<double>* params;
 //        params.reset(this->optimal_parameters);
-        ef.get_network_instance()->copy_parameter_space(this->optimal_parameters);
+        ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
 //
 //        delete gradient_current;
 //        delete gradient_prev;
@@ -223,10 +223,4 @@ namespace lib4neuro {
 //        delete params_best;
     }
 
-    std::vector<double> GradientDescentBB::get_parameters() {
-//        std::vector<double>* ret;
-//        ret.reset(&this->optimal_parameters);
-        return std::vector<double>(*this->optimal_parameters);
-    }
-
 }
diff --git a/src/LearningMethods/GradientDescentBB.h b/src/LearningMethods/GradientDescentBB.h
index db5008ad..5284c281 100644
--- a/src/LearningMethods/GradientDescentBB.h
+++ b/src/LearningMethods/GradientDescentBB.h
@@ -52,11 +52,6 @@ namespace lib4neuro {
          */
         long long int maximum_niters;
 
-        /**
-         * Vector of minima coordinates
-         */
-        std::vector<double>* optimal_parameters = new std::vector<double>(5);
-
     public:
 
         /**
@@ -78,11 +73,6 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
 
-        /**
-         *
-         * @return
-         */
-        LIB4NEURO_API std::vector<double> get_parameters() override;
     };
 
 }
diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp
index 6d33cbdd..58d473e6 100644
--- a/src/LearningMethods/GradientDescentSingleItem.cpp
+++ b/src/LearningMethods/GradientDescentSingleItem.cpp
@@ -105,10 +105,4 @@ namespace lib4neuro {
 
     }
 
-    std::vector<double> GradientDescentSingleItem::get_parameters() {
-//        std::shared_ptr<std::vector<double>> ret;
-//        ret.reset(&this->optimal_parameters);
-        return std::vector<double>(*this->optimal_parameters);
-    }
-
 }
diff --git a/src/LearningMethods/GradientDescentSingleItem.h b/src/LearningMethods/GradientDescentSingleItem.h
index 61084edc..fccca9f4 100644
--- a/src/LearningMethods/GradientDescentSingleItem.h
+++ b/src/LearningMethods/GradientDescentSingleItem.h
@@ -92,12 +92,6 @@ namespace lib4neuro {
          * @param ef
          */
         LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
-
-        /**
-         *
-         * @return
-         */
-        LIB4NEURO_API std::vector<double> get_parameters() override;
     };
 
 }
diff --git a/src/LearningMethods/LearningMethod.h b/src/LearningMethods/LearningMethod.h
index 29b36233..aadc75a3 100644
--- a/src/LearningMethods/LearningMethod.h
+++ b/src/LearningMethods/LearningMethod.h
@@ -14,6 +14,13 @@
 
 namespace lib4neuro {
     class LearningMethod {
+    protected:
+
+        /**
+         * Vector of minima coordinates
+         */
+        std::vector<double> optimal_parameters;
+
     public:
         /**
          * Runs the method specific learning algorithm minimizing the given error function
@@ -24,7 +31,7 @@ namespace lib4neuro {
         /**
          * Updates the optimal weight&bias settings in the passed vector
          */
-        virtual std::vector<double> get_parameters() = 0;
+        virtual std::vector<double>* get_parameters();
     };
 
     class GradientLearningMethod : public LearningMethod {
diff --git a/src/LearningMethods/LearningMethods.cpp b/src/LearningMethods/LearningMethods.cpp
index 8c4b3b48..f1aba80e 100644
--- a/src/LearningMethods/LearningMethods.cpp
+++ b/src/LearningMethods/LearningMethods.cpp
@@ -8,6 +8,10 @@
 #include "LearningMethod.h"
 
 namespace lib4neuro {
+    std::vector<double>* LearningMethod::get_parameters() {
+        return &this->optimal_parameters;
+    }
+
     void GradientLearningMethod::optimize(ErrorFunction& ef,
                                           std::ofstream* ofs) {
         this->optimize(ef, ofs);
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index 33e943cd..92bf350d 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -13,15 +13,11 @@ namespace lib4neuro {
     LearningSequence::LearningSequence( double tolerance, int max_n_cycles ){
         this->tol = tolerance;
         this->max_number_of_cycles = max_n_cycles;
-//        this->best_parameters = new std::vector<double>();
+//        this->optimal_parameters = new std::vector<double>();
     }
 
     LearningSequence::~LearningSequence() = default;
 
-    std::vector<double> LearningSequence::get_parameters() {
-        return this->best_parameters;
-    }
-
     void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) {
         this->learning_sequence.push_back( method );
     }
@@ -37,7 +33,7 @@ namespace lib4neuro {
         double the_best_error = error;
         int mcycles = this->max_number_of_cycles, cycle_idx = 0;
 
-//        std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->best_parameters);
+//        std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->optimal_parameters);
         std::vector<double> params;
         while( error > this->tol && mcycles != 0){
             mcycles--;
@@ -52,7 +48,8 @@ namespace lib4neuro {
                 m->optimize( ef, ofs );
                 puts("*********************** 9");
 
-                params = m->get_parameters();
+                //TODO do NOT copy vectors if not needed
+                params = *m->get_parameters();
                 error = ef.eval(&params);
                 puts("*********************** 10");
 
@@ -64,18 +61,18 @@ namespace lib4neuro {
 
                 if( error < the_best_error ){
                     the_best_error = error;
-                    this->best_parameters = ef.get_parameters();
+                    this->optimal_parameters = ef.get_parameters();
 //                    best_params = ef.get_parameters();
 //                    best_params.reset(ef.get_parameters().get());
                 }
 
                 if( error <= this->tol ){
-                    ef.get_network_instance()->copy_parameter_space( &this->best_parameters );
+                    ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
                     return;
                 }
             }
             COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl );
         }
-        ef.get_network_instance()->copy_parameter_space( &this->best_parameters );
+        ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
     }
 }
\ No newline at end of file
diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h
index 1399081a..2e2b9511 100644
--- a/src/LearningMethods/LearningSequence.h
+++ b/src/LearningMethods/LearningSequence.h
@@ -31,11 +31,6 @@ namespace lib4neuro {
          */
         double tol;
 
-        /**
-         *
-         */
-        std::vector<double> best_parameters; // = nullptr;
-
         /**
          *
          */
@@ -61,12 +56,6 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
 
-        /**
-         *
-         * @return
-         */
-        LIB4NEURO_API std::vector<double> get_parameters() override;
-
         /**
          *
          * @param method
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index 8320487a..71bc9019 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -28,11 +28,6 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
 
     unsigned long batch_size;
 
-    /**
-     * Vector of minimum coordinates
-     */
-    std::vector<double> optimal_parameters;
-
     /**
      * Returns Jacobian matrix of the residual function using the backpropagation algorithm
      * Returns the right hand side of the resulting system of equations related to data errors in @data and approximating function @f
@@ -232,7 +227,7 @@ namespace lib4neuro {
         COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << std::endl);
 
         /* Store the optimized parameters */
-        this->p_impl->optimal_parameters = *params_current;
+        this->optimal_parameters = *params_current;
 
         /* Dealloc vector of parameters */
         if(params_current) {
@@ -241,17 +236,11 @@ namespace lib4neuro {
         }
 
 //        std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->p_impl->optimal_parameters);
-        ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters);
+        ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
 
 //        delete params_tmp;
 
     }
 
-    std::vector<double> LevenbergMarquardt::get_parameters() {
-//        std::shared_ptr<std::vector<double>> ret;
-//        ret.reset(&this->p_impl->optimal_parameters);
-        return this->p_impl->optimal_parameters;
-    }
-
     LevenbergMarquardt::~LevenbergMarquardt() = default;
 }
\ No newline at end of file
diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h
index 0ae18154..1622b88f 100644
--- a/src/LearningMethods/LevenbergMarquardt.h
+++ b/src/LearningMethods/LevenbergMarquardt.h
@@ -47,8 +47,6 @@ namespace lib4neuro {
                       LM_UPDATE_TYPE update_type,
                       std::ofstream* ofs = nullptr);
 
-        std::vector<double> get_parameters() override;
-
         ~LevenbergMarquardt();
     };
 
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index eadc193f..af97eb13 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -271,9 +271,9 @@ namespace lib4neuro {
 //            delete this->domain_bounds;
 //        }
 //
-//        if (this->p_min_glob) {
-//            delete this->p_min_glob;
-//            this->p_min_glob = nullptr;
+//        if (this->optimal_parameters) {
+//            delete this->optimal_parameters;
+//            this->optimal_parameters = nullptr;
 //        }
 
     }
@@ -310,10 +310,10 @@ namespace lib4neuro {
         }
         this->radius_factor *= 1.25;
 
-//        if (!this->p_min_glob) {
-//            this->p_min_glob = new std::vector<double>(this->func_dim);
+//        if (!this->optimal_parameters) {
+//            this->optimal_parameters = new std::vector<double>(this->func_dim);
 //        } else {
-            this->p_min_glob.resize(this->func_dim);
+            this->optimal_parameters.resize(this->func_dim);
 //        }
 
         size_t outer_it = 0;
@@ -333,7 +333,7 @@ namespace lib4neuro {
         double euclidean_dist;
         double current_err = -1;
 
-        this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
+        this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
 //    for(unsigned int i = 0; i < this->n_particles; ++i){
 //        this->particle_swarm[i]->print_coordinate();
 //    }
@@ -346,10 +346,10 @@ namespace lib4neuro {
             //////////////////////////////////////////////////
             // Clustering algorithm - termination condition //
             //////////////////////////////////////////////////
-            particle = this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
+            particle = this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
 
-            if (std::find(global_best_vec.begin(), global_best_vec.end(), this->p_min_glob) == global_best_vec.end()) {
-                global_best_vec.emplace_back(this->p_min_glob); // TODO rewrite as std::set
+            if (std::find(global_best_vec.begin(), global_best_vec.end(), this->optimal_parameters) == global_best_vec.end()) {
+                global_best_vec.emplace_back(this->optimal_parameters); // TODO rewrite as std::set
             }
 
             cluster.insert(particle);
@@ -373,7 +373,7 @@ namespace lib4neuro {
 
             for (size_t pi = 0; pi < this->n_particles; pi++) {
                 particle = this->particle_swarm.at(pi);
-                tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->p_min_glob,
+                tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->optimal_parameters,
                                                            global_best_vec);
 //                particle->print_coordinate();
 
@@ -413,8 +413,8 @@ namespace lib4neuro {
 //            }
 //        }
 
-//            std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob);
-            current_err = ef.eval(&this->p_min_glob);
+//            std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->optimal_parameters);
+            current_err = ef.eval(&this->optimal_parameters);
 
             COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
                                                   << ". Total error: " << current_err
@@ -447,7 +447,7 @@ namespace lib4neuro {
                                               << ". Objective function value: " << optimal_value
                                               << "." << std::endl );
 
-        this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
+        this->determine_optimal_coordinate_and_value(this->optimal_parameters, optimal_value);
         //TODO rewrite following output using COUT_INFO
         if (outer_it < this->iter_max) {
             /* Convergence reached */
@@ -458,8 +458,8 @@ namespace lib4neuro {
         }
 
 //        std::shared_ptr<std::vector<double>> coord;
-//        coord.reset(this->p_min_glob);
-        ef.get_network_instance()->copy_parameter_space(&this->p_min_glob);
+//        coord.reset(this->optimal_parameters);
+        ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
 
         delete centroid;
     }
@@ -534,12 +534,6 @@ namespace lib4neuro {
         return std::sqrt(dist);
     }
 
-    std::vector<double> ParticleSwarm::get_parameters() {
-//        std::shared_ptr<std::vector<double>> ret;
-//        ret.reset(&this->p_min_glob);
-        return this->p_min_glob;
-    }
-
     void ParticleSwarm::init_constructor(std::vector<double>* domain_bounds,
                                          double c1,
                                          double c2,
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index ac6d6c9d..0a80ed70 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -197,11 +197,6 @@ namespace lib4neuro {
          */
         std::vector<double> domain_bounds; // = nullptr;
 
-        /**
-         * Coordinates of the found global minima
-         */
-        std::vector<double> p_min_glob;
-
     protected:
         /**
          *
@@ -305,11 +300,6 @@ namespace lib4neuro {
          */
         LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
 
-        /**
-         *
-         * @return
-         */
-        LIB4NEURO_API std::vector<double> get_parameters() override;
     };
 
 }
diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp
index f5a187de..8372d058 100644
--- a/src/LearningMethods/RandomSolution.cpp
+++ b/src/LearningMethods/RandomSolution.cpp
@@ -16,14 +16,6 @@ namespace lib4neuro {
 
     RandomSolution::~RandomSolution() {}
 
-    std::vector<double>  RandomSolution::get_parameters() {
-//        std::shared_ptr<std::vector<double>> ret;
-//        ret.reset(&this->optimal_parameters);
-//        return ret;
-
-        return this->optimal_parameters;
-    }
-
     void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
         ef.get_network_instance()->randomize_parameters();
 
diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h
index cdcda7df..cb61a566 100644
--- a/src/LearningMethods/RandomSolution.h
+++ b/src/LearningMethods/RandomSolution.h
@@ -16,9 +16,6 @@ namespace lib4neuro {
 
     class RandomSolution : public lib4neuro::LearningMethod {
 
-    private:
-        std::vector<double> optimal_parameters;
-
     protected:
     public:
 
@@ -28,9 +25,6 @@ namespace lib4neuro {
 
         void optimize(lib4neuro::ErrorFunction &ef,
                       std::ofstream *ofs = nullptr) override;
-
-        std::vector<double> get_parameters() override;
-
     };
 
 }
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 50fcb254..4b9df8c6 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -410,7 +410,7 @@ namespace lib4neuro {
         learning_method.optimize(total_error);
 //        std::shared_ptr<std::vector<double>> params;
 //        params.reset(learning_method.get_parameters());
-        std::vector<double> params = learning_method.get_parameters();
+        std::vector<double> params = *learning_method.get_parameters();
         this->solution->copy_parameter_space(&params);
 
         printf("error after optimization: %f\n", total_error.eval(nullptr));
-- 
GitLab