Skip to content
Snippets Groups Projects
Commit 8a8549f1 authored by Martin Beseda's avatar Martin Beseda
Browse files

WIP: Fixed memory errors...

parent 6d896ab3
Branches
Tags
No related merge requests found
......@@ -13,16 +13,16 @@ namespace lib4neuro {
GradientDescent::GradientDescent(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
this->tolerance = epsilon;
this->restart_frequency = n_to_restart;
this->optimal_parameters = new std::vector<double>(0);
// this->optimal_parameters = new std::vector<double>(0);
this->maximum_niters = max_iters;
this->batch = batch;
}
GradientDescent::~GradientDescent() {
if (this->optimal_parameters) {
delete this->optimal_parameters;
this->optimal_parameters = nullptr;
}
// if (this->optimal_parameters) {
// delete this->optimal_parameters;
// this->optimal_parameters = nullptr;
// }
}
void GradientDescent::eval_step_size_mk(double &gamma,
......@@ -295,8 +295,8 @@ namespace lib4neuro {
#endif
}
*this->optimal_parameters = *params_current;
ef.get_network_instance()->copy_parameter_space( this->optimal_parameters );
this->optimal_parameters = *params_current;
ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
delete gradient_current;
delete gradient_prev;
......@@ -305,6 +305,6 @@ namespace lib4neuro {
}
std::vector<double> *GradientDescent::get_parameters() {
return this->optimal_parameters;
return &this->optimal_parameters;
}
}
......@@ -44,7 +44,7 @@ namespace lib4neuro {
/**
* Vector of minima coordinates
*/
std::vector<double> *optimal_parameters;
std::vector<double> optimal_parameters;
/**
* Adaptive calculation of the step-size based on several historical characteristics.
......
......@@ -13,13 +13,10 @@ namespace lib4neuro {
LearningSequence::LearningSequence( double tolerance, int max_n_cycles ){
this->tol = tolerance;
this->max_number_of_cycles = max_n_cycles;
this->best_parameters = new std::vector<double>();
// this->best_parameters = new std::vector<double>();
}
LearningSequence::~LearningSequence() {
}
LearningSequence::~LearningSequence() = default;
std::vector<double>* LearningSequence::get_parameters() {
if( this->learning_sequence.size() > 0 ){
......@@ -28,7 +25,7 @@ namespace lib4neuro {
return nullptr;
}
void LearningSequence::add_learning_method(LearningMethod *method) {
void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) {
this->learning_sequence.push_back( method );
}
......@@ -49,16 +46,16 @@ namespace lib4neuro {
if( error < the_best_error ){
the_best_error = error;
*this->best_parameters = *ef.get_parameters();
this->best_parameters = *ef.get_parameters();
}
if( error <= this->tol ){
ef.get_network_instance()->copy_parameter_space( this->best_parameters );
ef.get_network_instance()->copy_parameter_space( &this->best_parameters );
return;
}
}
COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl );
}
ef.get_network_instance()->copy_parameter_space( this->best_parameters );
ef.get_network_instance()->copy_parameter_space( &this->best_parameters );
}
}
\ No newline at end of file
......@@ -24,7 +24,7 @@ namespace lib4neuro {
/**
*
*/
std::vector<LearningMethod*> learning_sequence;
std::vector<std::shared_ptr<LearningMethod>> learning_sequence;
/**
*
......@@ -34,7 +34,7 @@ namespace lib4neuro {
/**
*
*/
std::vector<double> *best_parameters = nullptr;
std::vector<double> best_parameters; // = nullptr;
/**
*
......@@ -71,7 +71,7 @@ namespace lib4neuro {
*
* @param method
*/
LIB4NEURO_API void add_learning_method( LearningMethod * method );
LIB4NEURO_API void add_learning_method( std::shared_ptr<LearningMethod> method );
};
}
......
......@@ -31,7 +31,7 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
/**
* Vector of minimum coordinates
*/
std::vector<double> *optimal_parameters;
std::vector<double> optimal_parameters;
/**
* Returns Jacobian matrix of the residual function using the backpropagation algorithm
......@@ -92,7 +92,7 @@ namespace lib4neuro {
this->p_impl->lambda_increase = lambda_increase;
this->p_impl->lambda_decrease = lambda_decrease;
this->p_impl->maximum_niters = max_iters;
this->p_impl->optimal_parameters = new std::vector<double>();
this->p_impl->optimal_parameters; // = new std::vector<double>();
}
void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
......@@ -229,16 +229,16 @@ namespace lib4neuro {
COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << std::endl);
/* Store the optimized parameters */
*this->p_impl->optimal_parameters = *params_current;
this->p_impl->optimal_parameters = *params_current;
ef.get_network_instance()->copy_parameter_space(this->p_impl->optimal_parameters);
ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters);
delete params_tmp;
}
std::vector<double>* LevenbergMarquardt::get_parameters() {
return this->p_impl->optimal_parameters;
return &this->p_impl->optimal_parameters;
}
LevenbergMarquardt::~LevenbergMarquardt() = default;
......
......@@ -11,21 +11,21 @@
namespace lib4neuro {
RandomSolution::RandomSolution() {
this->optimal_parameters = new std::vector<double>();
// this->optimal_parameters = new std::vector<double>();
}
RandomSolution::~RandomSolution() {
}
std::vector<double> *RandomSolution::get_parameters() {
return this->optimal_parameters;
std::vector<double>* RandomSolution::get_parameters() {
return &this->optimal_parameters;
}
void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
ef.get_network_instance()->randomize_parameters();
*this->optimal_parameters = *ef.get_parameters();
COUT_INFO("Producing a random solution... error: " << ef.eval(this->optimal_parameters) << std::endl);
this->optimal_parameters = *ef.get_parameters();
COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl);
}
}
\ No newline at end of file
......@@ -17,7 +17,7 @@ namespace lib4neuro {
class RandomSolution : public lib4neuro::LearningMethod {
private:
std::vector<double> *optimal_parameters;
std::vector<double> optimal_parameters;
protected:
public:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment