diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp index 01d9bc34adc84a988348bfc20b67368757c8436a..0d490a06482ef2d35a796d556a6d09eb4ba39859 100644 --- a/src/ErrorFunction/ErrorFunctions.cpp +++ b/src/ErrorFunction/ErrorFunctions.cpp @@ -75,20 +75,19 @@ namespace lib4neuro { return this->ds_test; } - std::vector<double>* ErrorFunction::get_parameters() { - std::vector<double>* output; - output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); + std::vector<double> ErrorFunction::get_parameters() { + std::vector<double> output(this->net->get_n_weights() + this->net->get_n_biases()); // std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); size_t i = 0; for (auto el: *this->net->get_parameter_ptr_weights()) { - output->at(i) = el; + output[i] = el; ++i; } for (auto el: *this->net->get_parameter_ptr_biases()) { - output->at(i) = el; + output[i] = el; ++i; } @@ -380,7 +379,7 @@ namespace lib4neuro { //TODO check input vector sizes - they HAVE TO be allocated before calling this function size_t n_parameters = this->get_dimension(); - std::vector<double>* parameters = this->get_parameters(); + std::vector<double> parameters = this->get_parameters(); double delta; // Complete step size double former_parameter_value; @@ -388,23 +387,23 @@ namespace lib4neuro { double f_val2; // f(x - delta) for (size_t i = 0; i < n_parameters; i++) { - delta = h * (1 + std::abs(parameters->at(i))); - former_parameter_value = parameters->at(i); + delta = h * (1 + std::abs(parameters[i])); + former_parameter_value = parameters[i]; if(delta != 0) { /* Computation of f_val1 = f(x + delta) */ - parameters->at(i) = former_parameter_value + delta; - f_val1 = this->calculate_single_residual(input, output, parameters); + parameters[i] = former_parameter_value + delta; + f_val1 = this->calculate_single_residual(input, output, ¶meters); /* Computation of f_val2 = f(x - delta) */ - parameters->at(i) = former_parameter_value - delta; - f_val2 = this->calculate_single_residual(input, output, parameters); + parameters[i] = former_parameter_value - delta; + f_val2 = this->calculate_single_residual(input, output, ¶meters); gradient->at(i) = (f_val1 - f_val2) / (2*delta); } /* Restore parameter to the former value */ - parameters->at(i) = former_parameter_value; + parameters[i] = former_parameter_value; } } @@ -679,7 +678,7 @@ namespace lib4neuro { return this->dimension; } - std::vector<double>* ErrorSum::get_parameters() { + std::vector<double> ErrorSum::get_parameters() { return this->summand->at(0)->get_parameters(); } diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h index fb1f78814e7f170b415a643266b6403aa18a8a88..edc8d6bc99af3231b594c94cb0139bb7693bbb72 100644 --- a/src/ErrorFunction/ErrorFunctions.h +++ b/src/ErrorFunction/ErrorFunctions.h @@ -68,7 +68,7 @@ namespace lib4neuro { * * @return */ - virtual std::vector<double>* get_parameters(); + virtual std::vector<double> get_parameters(); /** * @return @@ -538,7 +538,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::vector<double>* get_parameters() override; + LIB4NEURO_API std::vector<double> get_parameters() override; /** * diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp index d74410235856521cb8b5ecbaf1989c95548dbc8a..c82db8d6a016a53c578be5f290c6358ddacda928 100644 --- a/src/LearningMethods/GradientDescent.cpp +++ b/src/LearningMethods/GradientDescent.cpp @@ -113,7 +113,7 @@ namespace lib4neuro { std::vector<double>* gradient_current(new std::vector<double>(n_parameters)); std::vector<double>* gradient_prev(new std::vector<double>(n_parameters)); - std::vector<double>* params_current = ef.get_parameters(); + std::vector<double>* params_current = new std::vector<double>(ef.get_parameters()); std::vector<double>* params_prev(new std::vector<double>(n_parameters)); std::vector<double>* ptr_mem; @@ -306,9 +306,9 @@ namespace lib4neuro { // delete params_prev; } - std::vector<double>* GradientDescent::get_parameters() { + std::vector<double> GradientDescent::get_parameters() { // std::shared_ptr<std::vector<double>> ret; // ret.reset(&this->optimal_parameters); - return this->optimal_parameters; + return std::vector<double>(*this->optimal_parameters); } } diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h index 3560c5123c21471d0c6d63ab1f3dbb3d137402fb..e8db77af5fe75927fe1ca77c987bd326626d8d9a 100644 --- a/src/LearningMethods/GradientDescent.h +++ b/src/LearningMethods/GradientDescent.h @@ -116,7 +116,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::vector<double>* get_parameters() override; + LIB4NEURO_API std::vector<double> get_parameters() override; }; } diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp index 3d8626d0d0c1b85064cd69a9db84eaa2827f8ab9..55d5043c64079bb30cb335c254be4c245444d9e8 100644 --- a/src/LearningMethods/GradientDescentBB.cpp +++ b/src/LearningMethods/GradientDescentBB.cpp @@ -55,7 +55,7 @@ namespace lib4neuro { std::vector<double>* gradient_current(new std::vector<double>(n_parameters)); std::vector<double>* gradient_prev(new std::vector<double>(n_parameters)); - std::vector<double>* params_current = ef.get_parameters(); + std::vector<double>* params_current = new std::vector<double>(ef.get_parameters()); std::vector<double>* params_prev(new std::vector<double>(n_parameters)); std::vector<double>* params_best(new std::vector<double>(*params_current)); @@ -223,10 +223,10 @@ namespace lib4neuro { // delete params_best; } - std::vector<double>* GradientDescentBB::get_parameters() { + std::vector<double> GradientDescentBB::get_parameters() { // std::vector<double>* ret; // ret.reset(&this->optimal_parameters); - return this->optimal_parameters; + return std::vector<double>(*this->optimal_parameters); } } diff --git a/src/LearningMethods/GradientDescentBB.h b/src/LearningMethods/GradientDescentBB.h index 5b376f0728e4343123efb0ce5c6cbf3c3698712e..db5008ad86ca3ebda1a2a7c25f967e32edab0687 100644 --- a/src/LearningMethods/GradientDescentBB.h +++ b/src/LearningMethods/GradientDescentBB.h @@ -82,7 +82,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::vector<double>* get_parameters() override; + LIB4NEURO_API std::vector<double> get_parameters() override; }; } diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp index 1340f33e4a9fda0977fd111e8e2af9174217099a..6d33cbdd51c5789c876df067bfac4055d1ecf948 100644 --- a/src/LearningMethods/GradientDescentSingleItem.cpp +++ b/src/LearningMethods/GradientDescentSingleItem.cpp @@ -61,7 +61,7 @@ namespace lib4neuro { size_t iter_idx = this->maximum_niters; size_t dim = ef.get_network_instance()->get_n_biases() + ef.get_network_instance()->get_n_weights(); - std::vector<double> parameter_vector = *ef.get_parameters(); + std::vector<double> parameter_vector = ef.get_parameters(); std::vector<double> gradient_vector(dim); std::vector<double> search_direction(dim); std::vector<double> error_vector(ef.get_network_instance()->get_n_outputs()); @@ -73,7 +73,7 @@ namespace lib4neuro { updated_elements = 0; std::fill(search_direction.begin(), search_direction.end(), 0); for( size_t i = 0; i < ef.get_dataset()->get_n_elements(); ++i){ - error = ef.eval_single_item_by_idx( i, ef.get_parameters(), error_vector ); + error = ef.eval_single_item_by_idx( i, ¶meter_vector, error_vector ); if( error > max_error ){ max_error = error; @@ -105,10 +105,10 @@ namespace lib4neuro { } - std::vector<double>* GradientDescentSingleItem::get_parameters() { + std::vector<double> GradientDescentSingleItem::get_parameters() { // std::shared_ptr<std::vector<double>> ret; // ret.reset(&this->optimal_parameters); - return this->optimal_parameters; + return std::vector<double>(*this->optimal_parameters); } } diff --git a/src/LearningMethods/GradientDescentSingleItem.h b/src/LearningMethods/GradientDescentSingleItem.h index 49e8d20d8707f34f6be1b204d92b403632e6743e..61084edcd1cdfb3b8feb83c3f336dc0602b7cad0 100644 --- a/src/LearningMethods/GradientDescentSingleItem.h +++ b/src/LearningMethods/GradientDescentSingleItem.h @@ -97,7 +97,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::vector<double>* get_parameters() override; + LIB4NEURO_API std::vector<double> get_parameters() override; }; } diff --git a/src/LearningMethods/LearningMethod.h b/src/LearningMethods/LearningMethod.h index 7173ce75870c1df8b58dd4dd2d0a31ec96de8507..29b36233447bb4d2765e1cc1ff451845a83f1132 100644 --- a/src/LearningMethods/LearningMethod.h +++ b/src/LearningMethods/LearningMethod.h @@ -24,7 +24,7 @@ namespace lib4neuro { /** * Updates the optimal weight&bias settings in the passed vector */ - virtual std::vector<double>* get_parameters() = 0; + virtual std::vector<double> get_parameters() = 0; }; class GradientLearningMethod : public LearningMethod { diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp index 39502e6f983e398e8addcdc49b0358243eeff2c9..33e943cd915bb9a8a6664ab2c70e446990d87bda 100644 --- a/src/LearningMethods/LearningSequence.cpp +++ b/src/LearningMethods/LearningSequence.cpp @@ -18,14 +18,8 @@ namespace lib4neuro { LearningSequence::~LearningSequence() = default; - std::vector<double>* LearningSequence::get_parameters() { - if( this->learning_sequence.size() > 0 ){ - return this->learning_sequence.at(0).get()->get_parameters( ); - } - -// std::shared_ptr<std::vector<double>> ret; -// ret.reset(); - return nullptr; + std::vector<double> LearningSequence::get_parameters() { + return this->best_parameters; } void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) { @@ -44,6 +38,7 @@ namespace lib4neuro { int mcycles = this->max_number_of_cycles, cycle_idx = 0; // std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->best_parameters); + std::vector<double> params; while( error > this->tol && mcycles != 0){ mcycles--; cycle_idx++; @@ -57,18 +52,19 @@ namespace lib4neuro { m->optimize( ef, ofs ); puts("*********************** 9"); - error = ef.eval(m->get_parameters()); + params = m->get_parameters(); + error = ef.eval(¶ms); puts("*********************** 10"); // std::shared_ptr<std::vector<double>> params; // params.reset(m->get_parameters()); - ef.get_network_instance()->copy_parameter_space(m->get_parameters()); + ef.get_network_instance()->copy_parameter_space(¶ms); if( error < the_best_error ){ the_best_error = error; - this->best_parameters = *ef.get_parameters(); + this->best_parameters = ef.get_parameters(); // best_params = ef.get_parameters(); // best_params.reset(ef.get_parameters().get()); } diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h index 7643a3449ecca15fd00a9169216c8b76bd9b726e..1399081a83bff035b8a7316c1e41effbf991cbe3 100644 --- a/src/LearningMethods/LearningSequence.h +++ b/src/LearningMethods/LearningSequence.h @@ -65,7 +65,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::vector<double>* get_parameters() override; + LIB4NEURO_API std::vector<double> get_parameters() override; /** * diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp index 98d248af1f7366f09d9a7cecda95d4607d2f070c..8320487a2ee892274655da98cdafd5853d9163a0 100644 --- a/src/LearningMethods/LevenbergMarquardt.cpp +++ b/src/LearningMethods/LevenbergMarquardt.cpp @@ -122,7 +122,7 @@ namespace lib4neuro { if( this->p_impl->batch_size > 0 ){ n_data_points = this->p_impl->batch_size; } - std::vector<double>* params_current = ef.get_parameters(); + std::vector<double>* params_current = new std::vector<double>(ef.get_parameters()); std::shared_ptr<std::vector<double>> params_tmp; params_tmp.reset(new std::vector<double>(n_parameters)); @@ -247,10 +247,10 @@ namespace lib4neuro { } - std::vector<double>* LevenbergMarquardt::get_parameters() { + std::vector<double> LevenbergMarquardt::get_parameters() { // std::shared_ptr<std::vector<double>> ret; // ret.reset(&this->p_impl->optimal_parameters); - return &this->p_impl->optimal_parameters; + return this->p_impl->optimal_parameters; } LevenbergMarquardt::~LevenbergMarquardt() = default; diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h index b831b62a178f53e3d3e1626e53ba6d08b504fc88..0ae18154a334d1f25e35a7c7eaf353434d296fa6 100644 --- a/src/LearningMethods/LevenbergMarquardt.h +++ b/src/LearningMethods/LevenbergMarquardt.h @@ -47,7 +47,7 @@ namespace lib4neuro { LM_UPDATE_TYPE update_type, std::ofstream* ofs = nullptr); - std::vector<double>* get_parameters() override; + std::vector<double> get_parameters() override; ~LevenbergMarquardt(); }; diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp index 23dca22026dfa4996ab6e11d62146a16f95a7c90..eadc193f284512fcf0cae3a8573bdebf24addeaa 100644 --- a/src/LearningMethods/ParticleSwarm.cpp +++ b/src/LearningMethods/ParticleSwarm.cpp @@ -306,14 +306,14 @@ namespace lib4neuro { if (this->particle_swarm.at(pi)) { delete this->particle_swarm.at(pi); } - this->particle_swarm.at(pi) = new Particle(&ef, ef.get_parameters(), this->radius_factor); + this->particle_swarm.at(pi) = new Particle(&ef, new std::vector<double>(ef.get_parameters()), this->radius_factor); } this->radius_factor *= 1.25; // if (!this->p_min_glob) { // this->p_min_glob = new std::vector<double>(this->func_dim); // } else { - this->p_min_glob->resize(this->func_dim); + this->p_min_glob.resize(this->func_dim); // } size_t outer_it = 0; @@ -333,7 +333,7 @@ namespace lib4neuro { double euclidean_dist; double current_err = -1; - this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value); + this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value); // for(unsigned int i = 0; i < this->n_particles; ++i){ // this->particle_swarm[i]->print_coordinate(); // } @@ -346,10 +346,10 @@ namespace lib4neuro { ////////////////////////////////////////////////// // Clustering algorithm - termination condition // ////////////////////////////////////////////////// - particle = this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value); + particle = this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value); - if (std::find(global_best_vec.begin(), global_best_vec.end(), *this->p_min_glob) == global_best_vec.end()) { - global_best_vec.emplace_back(*this->p_min_glob); // TODO rewrite as std::set + if (std::find(global_best_vec.begin(), global_best_vec.end(), this->p_min_glob) == global_best_vec.end()) { + global_best_vec.emplace_back(this->p_min_glob); // TODO rewrite as std::set } cluster.insert(particle); @@ -373,7 +373,7 @@ namespace lib4neuro { for (size_t pi = 0; pi < this->n_particles; pi++) { particle = this->particle_swarm.at(pi); - tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, *this->p_min_glob, + tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->p_min_glob, global_best_vec); // particle->print_coordinate(); @@ -414,7 +414,7 @@ namespace lib4neuro { // } // std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob); - current_err = ef.eval(this->p_min_glob); + current_err = ef.eval(&this->p_min_glob); COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it) << ". Total error: " << current_err @@ -447,7 +447,7 @@ namespace lib4neuro { << ". Objective function value: " << optimal_value << "." << std::endl ); - this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value); + this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value); //TODO rewrite following output using COUT_INFO if (outer_it < this->iter_max) { /* Convergence reached */ @@ -459,7 +459,7 @@ namespace lib4neuro { // std::shared_ptr<std::vector<double>> coord; // coord.reset(this->p_min_glob); - ef.get_network_instance()->copy_parameter_space(this->p_min_glob); + ef.get_network_instance()->copy_parameter_space(&this->p_min_glob); delete centroid; } @@ -534,7 +534,7 @@ namespace lib4neuro { return std::sqrt(dist); } - std::vector<double>* ParticleSwarm::get_parameters() { + std::vector<double> ParticleSwarm::get_parameters() { // std::shared_ptr<std::vector<double>> ret; // ret.reset(&this->p_min_glob); return this->p_min_glob; diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h index 6cd389f6bd1b97cfb5331d0e9d1bc701021d8219..ac6d6c9d8cd921015ae8e8f12009dff1a2fa0e75 100644 --- a/src/LearningMethods/ParticleSwarm.h +++ b/src/LearningMethods/ParticleSwarm.h @@ -200,7 +200,7 @@ namespace lib4neuro { /** * Coordinates of the found global minima */ - std::vector<double>* p_min_glob = new std::vector<double>; + std::vector<double> p_min_glob; protected: /** @@ -309,7 +309,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::vector<double>* get_parameters() override; + LIB4NEURO_API std::vector<double> get_parameters() override; }; } diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp index d022e8e4d62f89fe0f9138a2a5cff2544a3ec1d3..f5a187de1b6c8c73e95832671d0d8f95a53d78cc 100644 --- a/src/LearningMethods/RandomSolution.cpp +++ b/src/LearningMethods/RandomSolution.cpp @@ -16,25 +16,18 @@ namespace lib4neuro { RandomSolution::~RandomSolution() {} - std::vector<double>* RandomSolution::get_parameters() { + std::vector<double> RandomSolution::get_parameters() { // std::shared_ptr<std::vector<double>> ret; // ret.reset(&this->optimal_parameters); // return ret; - return &this->optimal_parameters; + return this->optimal_parameters; } void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) { ef.get_network_instance()->randomize_parameters(); - auto tmp = ef.get_parameters(); - - this->optimal_parameters = *tmp; - - if(tmp) { - delete tmp; - tmp = nullptr; - } + this->optimal_parameters = ef.get_parameters(); COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl); } diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h index 4b66a060bf54fe0dbec4c3a9e095fd7879c1d8dc..cdcda7df9edc4f1b183e9df867c255ab865f801a 100644 --- a/src/LearningMethods/RandomSolution.h +++ b/src/LearningMethods/RandomSolution.h @@ -29,7 +29,7 @@ namespace lib4neuro { void optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs = nullptr) override; - std::vector<double>* get_parameters() override; + std::vector<double> get_parameters() override; }; diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp index b8f3caa6bfa6cf8b47420dc889a1a81dbfbb5c49..50fcb254972ef9f481da59ee9afa0d8d60c2e74c 100644 --- a/src/Solvers/DESolver.cpp +++ b/src/Solvers/DESolver.cpp @@ -410,7 +410,8 @@ namespace lib4neuro { learning_method.optimize(total_error); // std::shared_ptr<std::vector<double>> params; // params.reset(learning_method.get_parameters()); - this->solution->copy_parameter_space(learning_method.get_parameters()); + std::vector<double> params = learning_method.get_parameters(); + this->solution->copy_parameter_space(¶ms); printf("error after optimization: %f\n", total_error.eval(nullptr)); }