From 8a8549f1793c1f35a3f15e90d28f477124e7ccfe Mon Sep 17 00:00:00 2001 From: Martin Beseda <martin.beseda@vsb.cz> Date: Wed, 20 Mar 2019 19:17:37 +0100 Subject: [PATCH] WIP: Fixed memory errors... --- src/LearningMethods/GradientDescent.cpp | 16 ++++++++-------- src/LearningMethods/GradientDescent.h | 2 +- src/LearningMethods/LearningSequence.cpp | 15 ++++++--------- src/LearningMethods/LearningSequence.h | 6 +++--- src/LearningMethods/LevenbergMarquardt.cpp | 10 +++++----- src/LearningMethods/RandomSolution.cpp | 10 +++++----- src/LearningMethods/RandomSolution.h | 2 +- 7 files changed, 29 insertions(+), 32 deletions(-) diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp index 5727490f..3832a232 100644 --- a/src/LearningMethods/GradientDescent.cpp +++ b/src/LearningMethods/GradientDescent.cpp @@ -13,16 +13,16 @@ namespace lib4neuro { GradientDescent::GradientDescent(double epsilon, size_t n_to_restart, int max_iters, size_t batch) { this->tolerance = epsilon; this->restart_frequency = n_to_restart; - this->optimal_parameters = new std::vector<double>(0); +// this->optimal_parameters = new std::vector<double>(0); this->maximum_niters = max_iters; this->batch = batch; } GradientDescent::~GradientDescent() { - if (this->optimal_parameters) { - delete this->optimal_parameters; - this->optimal_parameters = nullptr; - } +// if (this->optimal_parameters) { +// delete this->optimal_parameters; +// this->optimal_parameters = nullptr; +// } } void GradientDescent::eval_step_size_mk(double &gamma, @@ -295,8 +295,8 @@ namespace lib4neuro { #endif } - *this->optimal_parameters = *params_current; - ef.get_network_instance()->copy_parameter_space( this->optimal_parameters ); + this->optimal_parameters = *params_current; + ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters ); delete gradient_current; delete gradient_prev; @@ -305,6 +305,6 @@ namespace lib4neuro { } std::vector<double> *GradientDescent::get_parameters() { - return this->optimal_parameters; + return &this->optimal_parameters; } } diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h index 352def27..325078b0 100644 --- a/src/LearningMethods/GradientDescent.h +++ b/src/LearningMethods/GradientDescent.h @@ -44,7 +44,7 @@ namespace lib4neuro { /** * Vector of minima coordinates */ - std::vector<double> *optimal_parameters; + std::vector<double> optimal_parameters; /** * Adaptive calculation of the step-size based on several historical characteristics. diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp index 8d5f9ce7..05569b85 100644 --- a/src/LearningMethods/LearningSequence.cpp +++ b/src/LearningMethods/LearningSequence.cpp @@ -13,13 +13,10 @@ namespace lib4neuro { LearningSequence::LearningSequence( double tolerance, int max_n_cycles ){ this->tol = tolerance; this->max_number_of_cycles = max_n_cycles; - this->best_parameters = new std::vector<double>(); +// this->best_parameters = new std::vector<double>(); } - LearningSequence::~LearningSequence() { - - - } + LearningSequence::~LearningSequence() = default; std::vector<double>* LearningSequence::get_parameters() { if( this->learning_sequence.size() > 0 ){ @@ -28,7 +25,7 @@ namespace lib4neuro { return nullptr; } - void LearningSequence::add_learning_method(LearningMethod *method) { + void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) { this->learning_sequence.push_back( method ); } @@ -49,16 +46,16 @@ namespace lib4neuro { if( error < the_best_error ){ the_best_error = error; - *this->best_parameters = *ef.get_parameters(); + this->best_parameters = *ef.get_parameters(); } if( error <= this->tol ){ - ef.get_network_instance()->copy_parameter_space( this->best_parameters ); + ef.get_network_instance()->copy_parameter_space( &this->best_parameters ); return; } } COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl ); } - ef.get_network_instance()->copy_parameter_space( this->best_parameters ); + ef.get_network_instance()->copy_parameter_space( &this->best_parameters ); } } \ No newline at end of file diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h index bb975910..3f3f4b5d 100644 --- a/src/LearningMethods/LearningSequence.h +++ b/src/LearningMethods/LearningSequence.h @@ -24,7 +24,7 @@ namespace lib4neuro { /** * */ - std::vector<LearningMethod*> learning_sequence; + std::vector<std::shared_ptr<LearningMethod>> learning_sequence; /** * @@ -34,7 +34,7 @@ namespace lib4neuro { /** * */ - std::vector<double> *best_parameters = nullptr; + std::vector<double> best_parameters; // = nullptr; /** * @@ -71,7 +71,7 @@ namespace lib4neuro { * * @param method */ - LIB4NEURO_API void add_learning_method( LearningMethod * method ); + LIB4NEURO_API void add_learning_method( std::shared_ptr<LearningMethod> method ); }; } diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp index b9db3539..d10eab7b 100644 --- a/src/LearningMethods/LevenbergMarquardt.cpp +++ b/src/LearningMethods/LevenbergMarquardt.cpp @@ -31,7 +31,7 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl { /** * Vector of minimum coordinates */ - std::vector<double> *optimal_parameters; + std::vector<double> optimal_parameters; /** * Returns Jacobian matrix of the residual function using the backpropagation algorithm @@ -92,7 +92,7 @@ namespace lib4neuro { this->p_impl->lambda_increase = lambda_increase; this->p_impl->lambda_decrease = lambda_decrease; this->p_impl->maximum_niters = max_iters; - this->p_impl->optimal_parameters = new std::vector<double>(); + this->p_impl->optimal_parameters; // = new std::vector<double>(); } void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef, @@ -229,16 +229,16 @@ namespace lib4neuro { COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << std::endl); /* Store the optimized parameters */ - *this->p_impl->optimal_parameters = *params_current; + this->p_impl->optimal_parameters = *params_current; - ef.get_network_instance()->copy_parameter_space(this->p_impl->optimal_parameters); + ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters); delete params_tmp; } std::vector<double>* LevenbergMarquardt::get_parameters() { - return this->p_impl->optimal_parameters; + return &this->p_impl->optimal_parameters; } LevenbergMarquardt::~LevenbergMarquardt() = default; diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp index 84cabc9e..f5362566 100644 --- a/src/LearningMethods/RandomSolution.cpp +++ b/src/LearningMethods/RandomSolution.cpp @@ -11,21 +11,21 @@ namespace lib4neuro { RandomSolution::RandomSolution() { - this->optimal_parameters = new std::vector<double>(); +// this->optimal_parameters = new std::vector<double>(); } RandomSolution::~RandomSolution() { } - std::vector<double> *RandomSolution::get_parameters() { - return this->optimal_parameters; + std::vector<double>* RandomSolution::get_parameters() { + return &this->optimal_parameters; } void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) { ef.get_network_instance()->randomize_parameters(); - *this->optimal_parameters = *ef.get_parameters(); - COUT_INFO("Producing a random solution... error: " << ef.eval(this->optimal_parameters) << std::endl); + this->optimal_parameters = *ef.get_parameters(); + COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl); } } \ No newline at end of file diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h index 5a18b9dc..38fbdfdf 100644 --- a/src/LearningMethods/RandomSolution.h +++ b/src/LearningMethods/RandomSolution.h @@ -17,7 +17,7 @@ namespace lib4neuro { class RandomSolution : public lib4neuro::LearningMethod { private: - std::vector<double> *optimal_parameters; + std::vector<double> optimal_parameters; protected: public: -- GitLab