From 857513738829a044f4f736fc65b9000b0e9f473c Mon Sep 17 00:00:00 2001 From: Martin Beseda <martin.beseda@vsb.cz> Date: Thu, 21 Mar 2019 02:55:56 +0100 Subject: [PATCH] WIP: Trying to solve 'core dumped' error... --- src/ErrorFunction/ErrorFunctions.cpp | 103 +++++++++--------- src/ErrorFunction/ErrorFunctions.h | 92 ++++++++-------- src/LearningMethods/GradientDescent.cpp | 13 ++- src/LearningMethods/GradientDescent.h | 6 +- src/LearningMethods/GradientDescentBB.cpp | 7 +- .../GradientDescentSingleItem.cpp | 11 +- src/LearningMethods/LearningSequence.cpp | 14 ++- src/LearningMethods/LevenbergMarquardt.cpp | 5 +- src/LearningMethods/ParticleSwarm.cpp | 15 ++- src/LearningMethods/RandomSolution.cpp | 2 +- src/Network/NeuralNetwork.cpp | 6 +- src/Network/NeuralNetwork.h | 6 +- src/Network/NeuralNetworkSum.cpp | 4 +- src/Network/NeuralNetworkSum.h | 16 +-- src/Solvers/DESolver.cpp | 4 +- src/Solvers/DESolver.h | 2 +- src/examples/net_test_1.cpp | 4 +- src/examples/net_test_2.cpp | 4 +- src/examples/network_serialization.cpp | 4 +- src/examples/seminar.cpp | 2 +- 20 files changed, 166 insertions(+), 154 deletions(-) diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp index 02744c67..3d53690b 100644 --- a/src/ErrorFunction/ErrorFunctions.cpp +++ b/src/ErrorFunction/ErrorFunctions.cpp @@ -78,7 +78,7 @@ namespace lib4neuro { std::shared_ptr<std::vector<double>> ErrorFunction::get_parameters() { std::shared_ptr<std::vector<double>> output; output.reset(new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases())); -// std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); +// std::shared_ptr<std::vector<double>>output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); size_t i = 0; @@ -101,9 +101,9 @@ namespace lib4neuro { this->dimension = net->get_n_weights() + net->get_n_biases(); } - double MSE::eval_on_single_input(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* weights) { + double MSE::eval_on_single_input(std::shared_ptr<std::vector<double>>input, + std::shared_ptr<std::vector<double>>output, + std::shared_ptr<std::vector<double>> weights) { std::vector<double> predicted_output(this->get_network_instance()->get_n_outputs()); this->net->eval_single(*input, predicted_output, weights); double result = 0; @@ -119,7 +119,7 @@ namespace lib4neuro { double MSE::eval_on_data_set(lib4neuro::DataSet* data_set, std::ofstream* results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool denormalize_data, bool verbose) { size_t dim_in = data_set->get_input_dim(); @@ -268,7 +268,7 @@ namespace lib4neuro { double MSE::eval_on_data_set(DataSet* data_set, std::string results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { std::ofstream ofs(results_file_path); if (ofs.is_open()) { @@ -284,7 +284,7 @@ namespace lib4neuro { } double MSE::eval_on_data_set(DataSet* data_set, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { return this->eval_on_data_set(data_set, nullptr, @@ -293,7 +293,7 @@ namespace lib4neuro { verbose); } - double MSE::eval(std::vector<double>* weights, + double MSE::eval(std::shared_ptr<std::vector<double>> weights, bool denormalize_data, bool verbose) { return this->eval_on_data_set(this->ds, @@ -303,7 +303,7 @@ namespace lib4neuro { verbose); } - double MSE::eval_on_test_data(std::vector<double>* weights, + double MSE::eval_on_test_data(std::shared_ptr<std::vector<double>> weights, bool verbose) { return this->eval_on_data_set(this->ds_test, weights, @@ -311,7 +311,7 @@ namespace lib4neuro { } double MSE::eval_on_test_data(std::string results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { return this->eval_on_data_set(this->ds_test, results_file_path, @@ -320,7 +320,7 @@ namespace lib4neuro { } double MSE::eval_on_test_data(std::ofstream* results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { return this->eval_on_data_set(this->ds_test, results_file_path, @@ -345,11 +345,11 @@ namespace lib4neuro { } std::vector<double> error_derivative(dim_out); - + std::shared_ptr<std::vector<double>> params_tmp = std::make_shared<std::vector<double>>(params); for (auto el: *data) { // Iterate through every element in the test set this->net->eval_single(el.first, error_derivative, - ¶ms); // Compute the net output and store it into 'output' variable + params_tmp); // Compute the net output and store it into 'output' variable for (size_t j = 0; j < dim_out; ++j) { error_derivative[j] = 2.0 * (error_derivative[j] - el.second[j]); //real - expected result @@ -362,9 +362,9 @@ namespace lib4neuro { } } - double MSE::calculate_single_residual(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* parameters) { + double MSE::calculate_single_residual(std::shared_ptr<std::vector<double>>input, + std::shared_ptr<std::vector<double>>output, + std::shared_ptr<std::vector<double>>parameters) { //TODO maybe move to the general ErrorFunction //TODO check input vector sizes - they HAVE TO be allocated before calling this function @@ -372,9 +372,9 @@ namespace lib4neuro { return -this->eval_on_single_input(input, output, parameters); } - void MSE::calculate_residual_gradient(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* gradient, + void MSE::calculate_residual_gradient(std::shared_ptr<std::vector<double>>input, + std::shared_ptr<std::vector<double>>output, + std::shared_ptr<std::vector<double>>gradient, double h) { //TODO check input vector sizes - they HAVE TO be allocated before calling this function @@ -394,11 +394,11 @@ namespace lib4neuro { if(delta != 0) { /* Computation of f_val1 = f(x + delta) */ parameters->at(i) = former_parameter_value + delta; - f_val1 = this->calculate_single_residual(input, output, parameters.get()); + f_val1 = this->calculate_single_residual(input, output, parameters); /* Computation of f_val2 = f(x - delta) */ parameters->at(i) = former_parameter_value - delta; - f_val2 = this->calculate_single_residual(input, output, parameters.get()); + f_val2 = this->calculate_single_residual(input, output, parameters); gradient->at(i) = (f_val1 - f_val2) / (2*delta); } @@ -432,10 +432,11 @@ namespace lib4neuro { std::fill(grad_sum.begin(), grad_sum.end(), 0.0); this->net->write_weights(); this->net->write_biases(); + std::shared_ptr<std::vector<double>> params_tmp = std::make_shared<std::vector<double>>(params); for (auto el: *data) { // Iterate through every element in the test set this->net->eval_single_debug(el.first, error_derivative, - ¶ms); // Compute the net output and store it into 'output' variable + params_tmp); // Compute the net output and store it into 'output' variable std::cout << "Input["; for( auto v: el.first){ std::cout << v << ", "; @@ -484,7 +485,7 @@ namespace lib4neuro { std::cout << "]" << std::endl << std::endl; } - double MSE::eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, + double MSE::eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) { double output = 0, val; @@ -504,7 +505,7 @@ namespace lib4neuro { ErrorSum::ErrorSum() { this->summand = nullptr; - this->summand_coefficient = nullptr; +// this->summand_coefficient = nullptr; this->dimension = 0; } @@ -512,12 +513,12 @@ namespace lib4neuro { if (this->summand) { delete this->summand; } - if (this->summand_coefficient) { - delete this->summand_coefficient; - } +// if (this->summand_coefficient) { +// delete this->summand_coefficient; +// } } - double ErrorSum::eval_on_test_data(std::vector<double>* weights, + double ErrorSum::eval_on_test_data(std::shared_ptr<std::vector<double>> weights, bool verbose) { //TODO take care of the case, when there are no test data @@ -528,7 +529,7 @@ namespace lib4neuro { ef = this->summand->at(i); if (ef) { - output += ef->eval_on_test_data(weights) * this->summand_coefficient->at(i); + output += ef->eval_on_test_data(weights) * this->summand_coefficient.at(i); } } @@ -536,7 +537,7 @@ namespace lib4neuro { } double ErrorSum::eval_on_test_data(std::string results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); @@ -544,14 +545,14 @@ namespace lib4neuro { } double ErrorSum::eval_on_test_data(std::ofstream* results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); return -1; } double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); @@ -560,7 +561,7 @@ namespace lib4neuro { double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::string results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); @@ -569,14 +570,14 @@ namespace lib4neuro { double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::ofstream* results_file_path, - std::vector<double>* weights, + std::shared_ptr<std::vector<double>> weights, bool denormalize_data, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); return -1; } - double ErrorSum::eval(std::vector<double>* weights, + double ErrorSum::eval(std::shared_ptr<std::vector<double>> weights, bool denormalize_data, bool verbose) { double output = 0.0; @@ -586,14 +587,14 @@ namespace lib4neuro { ef = this->summand->at(i); if (ef) { - output += ef->eval(weights) * this->summand_coefficient->at(i); + output += ef->eval(weights) * this->summand_coefficient.at(i); } } return output; } - double ErrorSum::eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, + double ErrorSum::eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>>parameter_vector, std::vector<double> &error_vector) { double output = 0.0; ErrorFunction* ef = nullptr; @@ -604,10 +605,10 @@ namespace lib4neuro { ef = this->summand->at(i); if (ef) { - output += ef->eval_single_item_by_idx(i, parameter_vector, error_vector_mem) * this->summand_coefficient->at(j); + output += ef->eval_single_item_by_idx(i, parameter_vector, error_vector_mem) * this->summand_coefficient.at(j); for( size_t k = 0; k < error_vector_mem.size(); ++k){ - error_vector[k] += error_vector_mem[k] * this->summand_coefficient->at(j); + error_vector[k] += error_vector_mem[k] * this->summand_coefficient.at(j); } } } @@ -623,7 +624,7 @@ namespace lib4neuro { ef = this->summand->at(i); if (ef) { - ef->calculate_error_gradient(params, grad, this->summand_coefficient->at(i) * alpha, batch); + ef->calculate_error_gradient(params, grad, this->summand_coefficient.at(i) * alpha, batch); } } } @@ -641,7 +642,7 @@ namespace lib4neuro { ef = this->summand->at(i); if (ef) { - ef->calculate_error_gradient(params, grad, this->summand_coefficient->at(i) * alpha, batch); + ef->calculate_error_gradient(params, grad, this->summand_coefficient.at(i) * alpha, batch); } } } @@ -652,10 +653,10 @@ namespace lib4neuro { } this->summand->push_back(F); - if (!this->summand_coefficient) { - this->summand_coefficient = new std::vector<double>(0); - } - this->summand_coefficient->push_back(alpha); +// if (!this->summand_coefficient) { +// this->summand_coefficient = new std::vector<double>(0); +// } + this->summand_coefficient.push_back(alpha); if (F) { if (F->get_dimension() > this->dimension) { @@ -687,16 +688,16 @@ namespace lib4neuro { }; - void ErrorSum::calculate_residual_gradient(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* gradient, + void ErrorSum::calculate_residual_gradient(std::shared_ptr<std::vector<double>>input, + std::shared_ptr<std::vector<double>>output, + std::shared_ptr<std::vector<double>>gradient, double h) { THROW_NOT_IMPLEMENTED_ERROR(); } - double ErrorSum::calculate_single_residual(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* parameters) { + double ErrorSum::calculate_single_residual(std::shared_ptr<std::vector<double>>input, + std::shared_ptr<std::vector<double>>output, + std::shared_ptr<std::vector<double>>parameters) { THROW_NOT_IMPLEMENTED_ERROR(); return 0; diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h index dc7c6a49..23c146c4 100644 --- a/src/ErrorFunction/ErrorFunctions.h +++ b/src/ErrorFunction/ErrorFunctions.h @@ -27,7 +27,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false, + virtual double eval(std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data=false, bool verbose = false) = 0; /** @@ -100,7 +100,7 @@ namespace lib4neuro { /** * */ - virtual double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) = 0; + virtual double eval_on_test_data(std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) = 0; /** * @@ -108,7 +108,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr, + virtual double eval_on_test_data(std::string results_file_path, std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) = 0; /** @@ -117,7 +117,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr, + virtual double eval_on_test_data(std::ofstream* results_file_path, std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) = 0; /** @@ -126,7 +126,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr, + virtual double eval_on_data_set(DataSet* data_set, std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) = 0; /** @@ -137,7 +137,7 @@ namespace lib4neuro { * @return */ virtual double - eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights = nullptr, + eval_on_data_set(DataSet* data_set, std::string results_file_path, std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) = 0; /** @@ -149,7 +149,7 @@ namespace lib4neuro { */ virtual double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path = nullptr, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data = true, bool verbose = false) = 0; @@ -160,7 +160,7 @@ namespace lib4neuro { * @param error_vector * @return */ - virtual double eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, std::vector<double> &error_vector) = 0; + virtual double eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) = 0; /** * @@ -177,9 +177,9 @@ namespace lib4neuro { * @param h */ virtual void - calculate_residual_gradient(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* gradient, + calculate_residual_gradient(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> gradient, double h = 1e-3) = 0; /** @@ -190,9 +190,9 @@ namespace lib4neuro { * @return */ virtual double - calculate_single_residual(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* parameters = nullptr) = 0; + calculate_single_residual(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> parameters = nullptr) = 0; protected: @@ -237,7 +237,7 @@ namespace lib4neuro { * @param weights * @return */ - LIB4NEURO_API double eval(std::vector<double>* weights = nullptr, + LIB4NEURO_API double eval(std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data = false, bool verbose = false) override; @@ -275,9 +275,9 @@ namespace lib4neuro { * @return */ LIB4NEURO_API - virtual double calculate_single_residual(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* parameters) override; + virtual double calculate_single_residual(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> parameters) override; /** * Compute gradient of the residual function f(x) = 0 - MSE(x) for a specific input x. @@ -288,9 +288,9 @@ namespace lib4neuro { * @param[in] h Step used in the central difference */ LIB4NEURO_API void - calculate_residual_gradient(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* gradient, + calculate_residual_gradient(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> gradient, double h=1e-3) override; /** @@ -298,16 +298,16 @@ namespace lib4neuro { * @param input * @return */ - LIB4NEURO_API double eval_on_single_input(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* weights = nullptr); + LIB4NEURO_API double eval_on_single_input(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> weights = nullptr); /** * * @param weights * @return */ - LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override; + LIB4NEURO_API double eval_on_test_data(std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** * @@ -316,7 +316,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::string results_file_path = nullptr, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false); /** @@ -326,7 +326,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -338,7 +338,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data = false, bool verbose = false) override; @@ -349,7 +349,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -361,7 +361,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::string results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -371,7 +371,7 @@ namespace lib4neuro { * @param error_vector * @return */ - LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, std::vector<double> &error_vector) override; + LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) override; /** * @@ -398,7 +398,7 @@ namespace lib4neuro { * @param weights * @return */ - LIB4NEURO_API double eval(std::vector<double>* weights = nullptr, + LIB4NEURO_API double eval(std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data = false, bool verbose = false); @@ -407,7 +407,7 @@ namespace lib4neuro { * @param weights * @return */ - LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override; + LIB4NEURO_API double eval_on_test_data(std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** * @@ -416,7 +416,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::string results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -426,7 +426,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -436,7 +436,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -448,7 +448,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::string results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; /** @@ -460,7 +460,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path, - std::vector<double>* weights = nullptr, + std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data = true, bool verbose = false) override; @@ -471,7 +471,7 @@ namespace lib4neuro { * @param error_vector * @return */ - LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, std::vector<double> &error_vector) override; + LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) override; /** * @@ -518,15 +518,15 @@ namespace lib4neuro { size_t batch = 0) override; LIB4NEURO_API void - calculate_residual_gradient(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* gradient, + calculate_residual_gradient(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> gradient, double h = 1e-3) override; LIB4NEURO_API double - calculate_single_residual(std::vector<double>* input, - std::vector<double>* output, - std::vector<double>* parameters = nullptr) override; + calculate_single_residual(std::shared_ptr<std::vector<double>> input, + std::shared_ptr<std::vector<double>> output, + std::shared_ptr<std::vector<double>> parameters = nullptr) override; /** @@ -543,7 +543,7 @@ namespace lib4neuro { protected: std::vector<ErrorFunction*>* summand; - std::vector<double>* summand_coefficient; + std::vector<double> summand_coefficient; }; } diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp index e041bcd3..390ca504 100644 --- a/src/LearningMethods/GradientDescent.cpp +++ b/src/LearningMethods/GradientDescent.cpp @@ -47,9 +47,9 @@ namespace lib4neuro { lib4neuro::ErrorFunction &ef, double error_previous, double step_coefficient, - std::vector<double> *direction, - std::vector<double> *parameters_before, - std::vector<double> *parameters_after + std::shared_ptr<std::vector<double>> direction, + std::shared_ptr<std::vector<double>> parameters_before, + std::shared_ptr<std::vector<double>> parameters_after ) { size_t i; @@ -124,7 +124,7 @@ namespace lib4neuro { std::fill(gradient_current->begin(), gradient_current->end(), 0.0); std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0); - val = ef.eval(params_current.get()); + val = ef.eval(params_current); double coeff = 1; bool it_analyzed = false; size_t counter_good_guesses = 0, counter_bad_guesses = 0, counter_simplified_direction_good = 0, counter_simplified_direction_bad = 0; @@ -181,7 +181,7 @@ namespace lib4neuro { for (i = 0; i < gradient_current->size(); ++i) { (*params_prev)[i] = (*params_current)[i] - cooling * gamma * (*gradient_current)[i]; } - val = ef.eval(params_prev.get()); + val = ef.eval(params_prev); // val = prev_val + 1.0; // coeff = 1; @@ -296,7 +296,8 @@ namespace lib4neuro { } this->optimal_parameters = *params_current; - ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters ); + std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->optimal_parameters); + ef.get_network_instance()->copy_parameter_space( params ); // delete gradient_current; // delete gradient_prev; diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h index 96b6429b..ab507466 100644 --- a/src/LearningMethods/GradientDescent.h +++ b/src/LearningMethods/GradientDescent.h @@ -86,9 +86,9 @@ namespace lib4neuro { lib4neuro::ErrorFunction &ef, double error_previous, double step_coefficient, - std::vector<double> * direction, - std::vector<double> *parameters_before, - std::vector<double> *parameters_after + std::shared_ptr<std::vector<double>> direction, + std::shared_ptr<std::vector<double>> parameters_before, + std::shared_ptr<std::vector<double>> parameters_after ); public: diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp index 86d67b6d..5577777f 100644 --- a/src/LearningMethods/GradientDescentBB.cpp +++ b/src/LearningMethods/GradientDescentBB.cpp @@ -68,7 +68,7 @@ namespace lib4neuro { std::fill(gradient_current->begin(), gradient_current->end(), 0.0); std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0); - val = ef.eval(params_current.get()); + val = ef.eval(params_current); val_best = val; // this-> batch = 0; @@ -143,7 +143,7 @@ namespace lib4neuro { params_prev = params_current; params_current = ptr_mem; - val = ef.eval(params_current.get()); + val = ef.eval(params_current); if( val < val_best ){ val_best = val; @@ -212,7 +212,8 @@ namespace lib4neuro { this->optimal_parameters = *params_best; // ef.analyze_error_gradient(*params_current, *gradient_current, 1.0, this->batch); - ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters); + std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->optimal_parameters); + ef.get_network_instance()->copy_parameter_space(params); // // delete gradient_current; // delete gradient_prev; diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp index ea6b41a4..953396e6 100644 --- a/src/LearningMethods/GradientDescentSingleItem.cpp +++ b/src/LearningMethods/GradientDescentSingleItem.cpp @@ -36,15 +36,15 @@ namespace lib4neuro { double value_shifted = value + 1.0; - std::vector<double> shifted_x(x); + std::shared_ptr<std::vector<double>> shifted_x = std::make_shared<std::vector<double>>(std::vector<double>(x)); while( value_shifted > value ){ alpha *= 0.5; for( size_t i = 0; i < x.size(); ++i ){ - shifted_x[ i ] = x [ i ] - alpha * d[ i ]; + (*shifted_x).at(i) = x.at(i) - alpha * d.at(i); } - value_shifted = f.eval( &shifted_x ); + value_shifted = f.eval( shifted_x ); } // std::cout << "Error reduction: " << value - value_shifted << std::endl; return alpha; @@ -73,7 +73,7 @@ namespace lib4neuro { updated_elements = 0; std::fill(search_direction.begin(), search_direction.end(), 0); for( size_t i = 0; i < ef.get_dataset()->get_n_elements(); ++i){ - error = ef.eval_single_item_by_idx( i, ¶meter_vector, error_vector ); + error = ef.eval_single_item_by_idx( i, ef.get_parameters(), error_vector ); if( error > max_error ){ max_error = error; @@ -99,7 +99,8 @@ namespace lib4neuro { COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements << ", # of elements with high error: " << updated_elements << ", max. error: " << max_error << std::endl); this->optimal_parameters = parameter_vector; - ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters ); + std::shared_ptr<std::vector<double>> opt_params = std::make_shared<std::vector<double>>(this->optimal_parameters); + ef.get_network_instance()->copy_parameter_space( opt_params ); } diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp index 6bd31a51..31e05b6a 100644 --- a/src/LearningMethods/LearningSequence.cpp +++ b/src/LearningMethods/LearningSequence.cpp @@ -43,6 +43,7 @@ namespace lib4neuro { double the_best_error = error; int mcycles = this->max_number_of_cycles, cycle_idx = 0; + std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->best_parameters); while( error > this->tol && mcycles != 0){ mcycles--; cycle_idx++; @@ -54,26 +55,27 @@ namespace lib4neuro { puts("*********************** 8"); m->optimize( ef, ofs ); - error = ef.eval(m->get_parameters().get()); - puts("*********************** 9"); - ef.get_network_instance()->copy_parameter_space(m->get_parameters().get()); - + error = ef.eval(m->get_parameters()); puts("*********************** 10"); + ef.get_network_instance()->copy_parameter_space(m->get_parameters()); + + + if( error < the_best_error ){ the_best_error = error; this->best_parameters = *ef.get_parameters(); } if( error <= this->tol ){ - ef.get_network_instance()->copy_parameter_space( &this->best_parameters ); + ef.get_network_instance()->copy_parameter_space( best_params ); return; } } COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl ); } - ef.get_network_instance()->copy_parameter_space( &this->best_parameters ); + ef.get_network_instance()->copy_parameter_space( best_params ); } } \ No newline at end of file diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp index 29873b0a..295f1a99 100644 --- a/src/LearningMethods/LevenbergMarquardt.cpp +++ b/src/LearningMethods/LevenbergMarquardt.cpp @@ -194,7 +194,7 @@ namespace lib4neuro { update_norm += update.at(i) * update.at(i); } update_norm = std::sqrt(update_norm); - current_err = ef.eval(params_tmp.get()); + current_err = ef.eval(params_tmp); /* Check, if the parameter update improved the function */ if(current_err < prev_err) { @@ -234,7 +234,8 @@ namespace lib4neuro { /* Store the optimized parameters */ this->p_impl->optimal_parameters = *params_current; - ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters); + std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->p_impl->optimal_parameters); + ef.get_network_instance()->copy_parameter_space(params); // delete params_tmp; diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp index d1306543..ed4590e2 100644 --- a/src/LearningMethods/ParticleSwarm.cpp +++ b/src/LearningMethods/ParticleSwarm.cpp @@ -81,7 +81,8 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bou (*this->optimal_coordinate)[i] = (*this->coordinate)[i]; } - this->optimal_value = this->ef->eval(this->coordinate); + std::shared_ptr<std::vector<double>> coord(this->coordinate); + this->optimal_value = this->ef->eval(coord); } @@ -118,7 +119,8 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_sy (*this->optimal_coordinate)[i] = (*this->coordinate)[i]; } - this->optimal_value = this->ef->eval(this->coordinate); + std::shared_ptr<std::vector<double>> coord(this->coordinate); + this->optimal_value = this->ef->eval(coord); } @@ -208,7 +210,8 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d output += std::abs(vel_mem); } - vel_mem = this->ef->eval(this->coordinate); + std::shared_ptr<std::vector<double>> coord(this->coordinate); + vel_mem = this->ef->eval(coord); this->current_val = vel_mem; if(vel_mem < this->optimal_value){ @@ -410,7 +413,8 @@ namespace lib4neuro { // } // } - current_err = ef.eval(&this->p_min_glob); + std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob); + current_err = ef.eval(coord); COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it) << ". Total error: " << current_err @@ -453,7 +457,8 @@ namespace lib4neuro { COUT_INFO( std::endl << "Max number of iterations reached (" << outer_it << ")! Objective function value: " << optimal_value <<std:: endl); } - ef.get_network_instance()->copy_parameter_space(&this->p_min_glob); + std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob); + ef.get_network_instance()->copy_parameter_space(coord); delete centroid; } diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp index 01e7c013..f7dd5031 100644 --- a/src/LearningMethods/RandomSolution.cpp +++ b/src/LearningMethods/RandomSolution.cpp @@ -27,7 +27,7 @@ namespace lib4neuro { void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) { ef.get_network_instance()->randomize_parameters(); this->optimal_parameters = *ef.get_parameters(); - COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl); + COUT_INFO("Producing a random solution... error: " << ef.eval(ef.get_parameters()) << std::endl); } } \ No newline at end of file diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp index e13646bb..0edfb96d 100644 --- a/src/Network/NeuralNetwork.cpp +++ b/src/Network/NeuralNetwork.cpp @@ -411,7 +411,7 @@ namespace lib4neuro { } void NeuralNetwork::eval_single_debug(::std::vector<double> &input, ::std::vector<double> &output, - ::std::vector<double> *custom_weights_and_biases) { + ::std::shared_ptr<std::vector<double>> custom_weights_and_biases) { if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) { THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!"); } @@ -542,7 +542,7 @@ namespace lib4neuro { this->layers_analyzed = false; } - void NeuralNetwork::copy_parameter_space(std::vector<double> *parameters) { + void NeuralNetwork::copy_parameter_space(std::shared_ptr<std::vector<double>> parameters) { if (parameters != nullptr) { for (unsigned int i = 0; i < this->connection_weights.size(); ++i) { this->connection_weights.at(i) = (*parameters).at(i); @@ -575,7 +575,7 @@ namespace lib4neuro { void NeuralNetwork::eval_single(::std::vector<double>& input, ::std::vector<double>& output, - ::std::vector<double>* custom_weights_and_biases) { + ::std::shared_ptr<std::vector<double>> custom_weights_and_biases) { if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) { THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!"); diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h index cc18cd56..54d8b563 100644 --- a/src/Network/NeuralNetwork.h +++ b/src/Network/NeuralNetwork.h @@ -184,7 +184,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API virtual void eval_single_debug(std::vector<double> &input, std::vector<double> &output, - std::vector<double> *custom_weights_and_biases = nullptr); + std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr); /** @@ -232,7 +232,7 @@ namespace lib4neuro { * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values * @param parameters */ - LIB4NEURO_API virtual void copy_parameter_space(std::vector<double> *parameters); + LIB4NEURO_API virtual void copy_parameter_space(std::shared_ptr<std::vector<double>> parameters); /** * Copies the pointers @{this->connection_weights} and @{this->neuron_biases} from the parental network, sets @@ -248,7 +248,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output, - std::vector<double> *custom_weights_and_biases = nullptr); + std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr); /** * diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp index 16dafc1e..d4a66ab3 100644 --- a/src/Network/NeuralNetworkSum.cpp +++ b/src/Network/NeuralNetworkSum.cpp @@ -49,7 +49,7 @@ namespace lib4neuro { } void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output, - std::vector<double> *custom_weights_and_biases) { + std::shared_ptr<std::vector<double>> custom_weights_and_biases) { std::vector<double> mem_output(output.size()); std::fill(output.begin(), output.end(), 0.0); @@ -155,7 +155,7 @@ namespace lib4neuro { } void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output, - std::vector<double> *custom_weights_and_biases) { + std::shared_ptr<std::vector<double>> custom_weights_and_biases) { std::vector<double> mem_output(output.size()); std::fill(output.begin(), output.end(), 0.0); diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h index c20bb888..0fcfc284 100644 --- a/src/Network/NeuralNetworkSum.h +++ b/src/Network/NeuralNetworkSum.h @@ -41,7 +41,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API void eval_single(std::vector<double> &input, std::vector<double> &output, - std::vector<double> *custom_weights_and_biases = nullptr) override; + std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr) override; /** * @@ -50,7 +50,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API void eval_single_debug(std::vector<double> &input, std::vector<double> &output, - std::vector<double> *custom_weights_and_biases = nullptr) override; + std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr) override; /** @@ -66,37 +66,37 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API virtual size_t get_n_inputs() override; + LIB4NEURO_API size_t get_n_inputs() override; /** * * @return */ - LIB4NEURO_API virtual size_t get_n_outputs() override; + LIB4NEURO_API size_t get_n_outputs() override; /** * * @return */ - LIB4NEURO_API virtual size_t get_n_weights() override; + LIB4NEURO_API size_t get_n_weights() override; /** * * @return */ - LIB4NEURO_API virtual size_t get_n_biases() override; + LIB4NEURO_API size_t get_n_biases() override; /** * * @return */ - LIB4NEURO_API virtual size_t get_n_neurons() override; + LIB4NEURO_API size_t get_n_neurons() override; /** * * @return */ //TODO only works if all the networks share the same parameters - LIB4NEURO_API virtual std::vector<double> *get_parameter_ptr_weights() override; + LIB4NEURO_API std::vector<double> *get_parameter_ptr_weights() override; /** * diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp index 262bb667..b3aa6697 100644 --- a/src/Solvers/DESolver.cpp +++ b/src/Solvers/DESolver.cpp @@ -408,7 +408,7 @@ namespace lib4neuro { printf("error before optimization: %f\n", total_error.eval(nullptr)); learning_method.optimize(total_error); - this->solution->copy_parameter_space(learning_method.get_parameters().get()); + this->solution->copy_parameter_space(learning_method.get_parameters()); printf("error after optimization: %f\n", total_error.eval(nullptr)); } @@ -425,7 +425,7 @@ namespace lib4neuro { } double - DESolver::eval_equation(size_t equation_idx, std::vector<double> *weight_and_biases, std::vector<double> &input) { + DESolver::eval_equation(size_t equation_idx, std::shared_ptr<std::vector<double>>weight_and_biases, std::vector<double> &input) { std::vector<double> output(1); this->differential_equations.at(equation_idx)->eval_single(input, output, weight_and_biases); diff --git a/src/Solvers/DESolver.h b/src/Solvers/DESolver.h index dba092c8..c0b10618 100644 --- a/src/Solvers/DESolver.h +++ b/src/Solvers/DESolver.h @@ -166,7 +166,7 @@ namespace lib4neuro { * For testing purposes only */ LIB4NEURO_API double - eval_equation(size_t equation_idx, std::vector<double> *weights_and_biases, std::vector<double> &input); + eval_equation(size_t equation_idx, std::shared_ptr<std::vector<double>>weights_and_biases, std::vector<double> &input); /** * For testing purposes only diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp index fa98dc05..141de453 100644 --- a/src/examples/net_test_1.cpp +++ b/src/examples/net_test_1.cpp @@ -49,7 +49,7 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e ); swarm_01.optimize( ef ); - std::vector<double> *parameters = swarm_01.get_parameters().get(); + std::shared_ptr<std::vector<double>> parameters = swarm_01.get_parameters(); net.copy_parameter_space(parameters); /* ERROR CALCULATION */ @@ -64,7 +64,7 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction gd.optimize( ef ); - std::vector<double> *parameters = gd.get_parameters().get(); + std::shared_ptr<std::vector<double>> parameters = gd.get_parameters(); net.copy_parameter_space(parameters); /* ERROR CALCULATION */ diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp index 54c356e6..09920239 100644 --- a/src/examples/net_test_2.cpp +++ b/src/examples/net_test_2.cpp @@ -48,7 +48,7 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e ); swarm_01.optimize( ef ); - std::vector<double> *parameters = swarm_01.get_parameters().get(); + std::shared_ptr<std::vector<double>> parameters = swarm_01.get_parameters(); net.copy_parameter_space(parameters); std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl; @@ -61,7 +61,7 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction gd.optimize( ef ); - std::vector<double> *parameters = gd.get_parameters().get(); + std::shared_ptr<std::vector<double>> parameters = gd.get_parameters(); net.copy_parameter_space(parameters); /* ERROR CALCULATION */ diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp index 75890c6a..ab2acef2 100644 --- a/src/examples/network_serialization.cpp +++ b/src/examples/network_serialization.cpp @@ -55,7 +55,7 @@ int main() { size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS); size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS); - std::vector<double> *bv = net.get_parameter_ptr_biases(); + std::vector<double>* bv = net.get_parameter_ptr_biases(); for(size_t i = 0; i < 1; ++i){ bv->at(i) = 1.0; } @@ -116,7 +116,7 @@ int main() { ); swarm_01.optimize( mse ); - std::vector<double> *parameters = swarm_01.get_parameters().get(); + std::shared_ptr<std::vector<double>>parameters = swarm_01.get_parameters(); net.copy_parameter_space(parameters); printf("w1 = %10.7f\n", parameters->at( 0 )); diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp index ff36d496..2d68db5a 100644 --- a/src/examples/seminar.cpp +++ b/src/examples/seminar.cpp @@ -116,7 +116,7 @@ int main() { ); swarm_01.optimize( mse ); - std::vector<double> *parameters = swarm_01.get_parameters( ).get(); + std::shared_ptr<std::vector<double>> parameters = swarm_01.get_parameters( ); XOR.copy_parameter_space(parameters); /* ERROR CALCULATION */ -- GitLab