diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a4c228a96743d23bca4269718252c1a5dfc0ff9..21386f030f2dae28fe04d13be1946ecfd9746c9b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,7 +59,7 @@ endif() set(CMAKE_CXX_STANDARD 17) add_compile_options(-fsanitize=address) -add_link_options(-fsanitize=address) +add_link_options(-fsanitize=address -static-libasan) if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel" ) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -xHost" ) diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp index 3d53690b2f913ef0ca7e5aeea7f2b57fd0679483..01d9bc34adc84a988348bfc20b67368757c8436a 100644 --- a/src/ErrorFunction/ErrorFunctions.cpp +++ b/src/ErrorFunction/ErrorFunctions.cpp @@ -75,10 +75,10 @@ namespace lib4neuro { return this->ds_test; } - std::shared_ptr<std::vector<double>> ErrorFunction::get_parameters() { - std::shared_ptr<std::vector<double>> output; - output.reset(new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases())); -// std::shared_ptr<std::vector<double>>output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); + std::vector<double>* ErrorFunction::get_parameters() { + std::vector<double>* output; + output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); +// std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()); size_t i = 0; @@ -101,9 +101,9 @@ namespace lib4neuro { this->dimension = net->get_n_weights() + net->get_n_biases(); } - double MSE::eval_on_single_input(std::shared_ptr<std::vector<double>>input, - std::shared_ptr<std::vector<double>>output, - std::shared_ptr<std::vector<double>> weights) { + double MSE::eval_on_single_input(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* weights) { std::vector<double> predicted_output(this->get_network_instance()->get_n_outputs()); this->net->eval_single(*input, predicted_output, weights); double result = 0; @@ -119,7 +119,7 @@ namespace lib4neuro { double MSE::eval_on_data_set(lib4neuro::DataSet* data_set, std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool denormalize_data, bool verbose) { size_t dim_in = data_set->get_input_dim(); @@ -268,7 +268,7 @@ namespace lib4neuro { double MSE::eval_on_data_set(DataSet* data_set, std::string results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { std::ofstream ofs(results_file_path); if (ofs.is_open()) { @@ -284,7 +284,7 @@ namespace lib4neuro { } double MSE::eval_on_data_set(DataSet* data_set, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { return this->eval_on_data_set(data_set, nullptr, @@ -293,7 +293,7 @@ namespace lib4neuro { verbose); } - double MSE::eval(std::shared_ptr<std::vector<double>> weights, + double MSE::eval(std::vector<double>* weights, bool denormalize_data, bool verbose) { return this->eval_on_data_set(this->ds, @@ -303,7 +303,7 @@ namespace lib4neuro { verbose); } - double MSE::eval_on_test_data(std::shared_ptr<std::vector<double>> weights, + double MSE::eval_on_test_data(std::vector<double>* weights, bool verbose) { return this->eval_on_data_set(this->ds_test, weights, @@ -311,7 +311,7 @@ namespace lib4neuro { } double MSE::eval_on_test_data(std::string results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { return this->eval_on_data_set(this->ds_test, results_file_path, @@ -320,7 +320,7 @@ namespace lib4neuro { } double MSE::eval_on_test_data(std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { return this->eval_on_data_set(this->ds_test, results_file_path, @@ -345,11 +345,11 @@ namespace lib4neuro { } std::vector<double> error_derivative(dim_out); - std::shared_ptr<std::vector<double>> params_tmp = std::make_shared<std::vector<double>>(params); +// std::vector<double>* params_tmp = std::make_shared<std::vector<double>>(params); for (auto el: *data) { // Iterate through every element in the test set this->net->eval_single(el.first, error_derivative, - params_tmp); // Compute the net output and store it into 'output' variable + ¶ms); // Compute the net output and store it into 'output' variable for (size_t j = 0; j < dim_out; ++j) { error_derivative[j] = 2.0 * (error_derivative[j] - el.second[j]); //real - expected result @@ -362,9 +362,9 @@ namespace lib4neuro { } } - double MSE::calculate_single_residual(std::shared_ptr<std::vector<double>>input, - std::shared_ptr<std::vector<double>>output, - std::shared_ptr<std::vector<double>>parameters) { + double MSE::calculate_single_residual(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* parameters) { //TODO maybe move to the general ErrorFunction //TODO check input vector sizes - they HAVE TO be allocated before calling this function @@ -372,15 +372,15 @@ namespace lib4neuro { return -this->eval_on_single_input(input, output, parameters); } - void MSE::calculate_residual_gradient(std::shared_ptr<std::vector<double>>input, - std::shared_ptr<std::vector<double>>output, - std::shared_ptr<std::vector<double>>gradient, + void MSE::calculate_residual_gradient(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* gradient, double h) { //TODO check input vector sizes - they HAVE TO be allocated before calling this function size_t n_parameters = this->get_dimension(); - std::shared_ptr<std::vector<double>> parameters = this->get_parameters(); + std::vector<double>* parameters = this->get_parameters(); double delta; // Complete step size double former_parameter_value; @@ -432,11 +432,11 @@ namespace lib4neuro { std::fill(grad_sum.begin(), grad_sum.end(), 0.0); this->net->write_weights(); this->net->write_biases(); - std::shared_ptr<std::vector<double>> params_tmp = std::make_shared<std::vector<double>>(params); +// std::vector<double>* params_tmp = std::make_shared<std::vector<double>>(params); for (auto el: *data) { // Iterate through every element in the test set this->net->eval_single_debug(el.first, error_derivative, - params_tmp); // Compute the net output and store it into 'output' variable + ¶ms); // Compute the net output and store it into 'output' variable std::cout << "Input["; for( auto v: el.first){ std::cout << v << ", "; @@ -485,7 +485,7 @@ namespace lib4neuro { std::cout << "]" << std::endl << std::endl; } - double MSE::eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, + double MSE::eval_single_item_by_idx(size_t i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) { double output = 0, val; @@ -518,7 +518,7 @@ namespace lib4neuro { // } } - double ErrorSum::eval_on_test_data(std::shared_ptr<std::vector<double>> weights, + double ErrorSum::eval_on_test_data(std::vector<double>* weights, bool verbose) { //TODO take care of the case, when there are no test data @@ -537,7 +537,7 @@ namespace lib4neuro { } double ErrorSum::eval_on_test_data(std::string results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); @@ -545,14 +545,14 @@ namespace lib4neuro { } double ErrorSum::eval_on_test_data(std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); return -1; } double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); @@ -561,7 +561,7 @@ namespace lib4neuro { double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::string results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); @@ -570,14 +570,14 @@ namespace lib4neuro { double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set, std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights, + std::vector<double>* weights, bool denormalize_data, bool verbose) { THROW_NOT_IMPLEMENTED_ERROR(); return -1; } - double ErrorSum::eval(std::shared_ptr<std::vector<double>> weights, + double ErrorSum::eval(std::vector<double>* weights, bool denormalize_data, bool verbose) { double output = 0.0; @@ -594,7 +594,7 @@ namespace lib4neuro { return output; } - double ErrorSum::eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>>parameter_vector, + double ErrorSum::eval_single_item_by_idx(size_t i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) { double output = 0.0; ErrorFunction* ef = nullptr; @@ -679,7 +679,7 @@ namespace lib4neuro { return this->dimension; } - std::shared_ptr<std::vector<double>> ErrorSum::get_parameters() { + std::vector<double>* ErrorSum::get_parameters() { return this->summand->at(0)->get_parameters(); } @@ -688,16 +688,16 @@ namespace lib4neuro { }; - void ErrorSum::calculate_residual_gradient(std::shared_ptr<std::vector<double>>input, - std::shared_ptr<std::vector<double>>output, - std::shared_ptr<std::vector<double>>gradient, + void ErrorSum::calculate_residual_gradient(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* gradient, double h) { THROW_NOT_IMPLEMENTED_ERROR(); } - double ErrorSum::calculate_single_residual(std::shared_ptr<std::vector<double>>input, - std::shared_ptr<std::vector<double>>output, - std::shared_ptr<std::vector<double>>parameters) { + double ErrorSum::calculate_single_residual(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* parameters) { THROW_NOT_IMPLEMENTED_ERROR(); return 0; diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h index 23c146c4ec64810e52e686b1546a2a04950c237a..fb1f78814e7f170b415a643266b6403aa18a8a88 100644 --- a/src/ErrorFunction/ErrorFunctions.h +++ b/src/ErrorFunction/ErrorFunctions.h @@ -27,8 +27,10 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval(std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data=false, - bool verbose = false) = 0; +// virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false, +// bool verbose = false) = 0; + virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false, + bool verbose = false) = 0; /** * @@ -66,7 +68,7 @@ namespace lib4neuro { * * @return */ - virtual std::shared_ptr<std::vector<double>> get_parameters(); + virtual std::vector<double>* get_parameters(); /** * @return @@ -100,7 +102,7 @@ namespace lib4neuro { /** * */ - virtual double eval_on_test_data(std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) = 0; + virtual double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) = 0; /** * @@ -108,7 +110,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval_on_test_data(std::string results_file_path, std::shared_ptr<std::vector<double>> weights = nullptr, + virtual double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr, bool verbose = false) = 0; /** @@ -117,7 +119,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval_on_test_data(std::ofstream* results_file_path, std::shared_ptr<std::vector<double>> weights = nullptr, + virtual double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr, bool verbose = false) = 0; /** @@ -126,7 +128,7 @@ namespace lib4neuro { * @param weights * @return */ - virtual double eval_on_data_set(DataSet* data_set, std::shared_ptr<std::vector<double>> weights = nullptr, + virtual double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr, bool verbose = false) = 0; /** @@ -137,7 +139,7 @@ namespace lib4neuro { * @return */ virtual double - eval_on_data_set(DataSet* data_set, std::string results_file_path, std::shared_ptr<std::vector<double>> weights = nullptr, + eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights = nullptr, bool verbose = false) = 0; /** @@ -149,7 +151,7 @@ namespace lib4neuro { */ virtual double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path = nullptr, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool denormalize_data = true, bool verbose = false) = 0; @@ -160,7 +162,7 @@ namespace lib4neuro { * @param error_vector * @return */ - virtual double eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) = 0; + virtual double eval_single_item_by_idx(size_t i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) = 0; /** * @@ -177,9 +179,9 @@ namespace lib4neuro { * @param h */ virtual void - calculate_residual_gradient(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> gradient, + calculate_residual_gradient(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* gradient, double h = 1e-3) = 0; /** @@ -190,9 +192,9 @@ namespace lib4neuro { * @return */ virtual double - calculate_single_residual(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> parameters = nullptr) = 0; + calculate_single_residual(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* parameters = nullptr) = 0; protected: @@ -237,10 +239,13 @@ namespace lib4neuro { * @param weights * @return */ - LIB4NEURO_API double eval(std::shared_ptr<std::vector<double>> weights = nullptr, +// LIB4NEURO_API double eval(std::vector<double>* weights = nullptr, +// bool denormalize_data = false, +// bool verbose = false) override; + LIB4NEURO_API double eval(std::vector<double>* weights = nullptr, bool denormalize_data = false, bool verbose = false) override; - + /** * * @param params @@ -275,9 +280,9 @@ namespace lib4neuro { * @return */ LIB4NEURO_API - virtual double calculate_single_residual(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> parameters) override; + double calculate_single_residual(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* parameters) override; /** * Compute gradient of the residual function f(x) = 0 - MSE(x) for a specific input x. @@ -288,9 +293,9 @@ namespace lib4neuro { * @param[in] h Step used in the central difference */ LIB4NEURO_API void - calculate_residual_gradient(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> gradient, + calculate_residual_gradient(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* gradient, double h=1e-3) override; /** @@ -298,16 +303,16 @@ namespace lib4neuro { * @param input * @return */ - LIB4NEURO_API double eval_on_single_input(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> weights = nullptr); + LIB4NEURO_API double eval_on_single_input(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* weights = nullptr); /** * * @param weights * @return */ - LIB4NEURO_API double eval_on_test_data(std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; + LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override; /** * @@ -316,7 +321,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::string results_file_path = nullptr, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false); /** @@ -326,7 +331,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -338,7 +343,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool denormalize_data = false, bool verbose = false) override; @@ -349,7 +354,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -361,7 +366,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::string results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -371,14 +376,14 @@ namespace lib4neuro { * @param error_vector * @return */ - LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) override; + LIB4NEURO_API double eval_single_item_by_idx(size_t i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) override; /** * * @param error_vector * @param gradient_vector */ - LIB4NEURO_API virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) override; + LIB4NEURO_API void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) override; }; class ErrorSum : public ErrorFunction { @@ -398,7 +403,7 @@ namespace lib4neuro { * @param weights * @return */ - LIB4NEURO_API double eval(std::shared_ptr<std::vector<double>> weights = nullptr, + LIB4NEURO_API double eval(std::vector<double>* weights = nullptr, bool denormalize_data = false, bool verbose = false); @@ -407,7 +412,7 @@ namespace lib4neuro { * @param weights * @return */ - LIB4NEURO_API double eval_on_test_data(std::shared_ptr<std::vector<double>> weights = nullptr, bool verbose = false) override; + LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override; /** * @@ -416,7 +421,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::string results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -426,7 +431,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -436,7 +441,7 @@ namespace lib4neuro { * @return */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -448,7 +453,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::string results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool verbose = false) override; /** @@ -460,7 +465,7 @@ namespace lib4neuro { */ LIB4NEURO_API double eval_on_data_set(DataSet* data_set, std::ofstream* results_file_path, - std::shared_ptr<std::vector<double>> weights = nullptr, + std::vector<double>* weights = nullptr, bool denormalize_data = true, bool verbose = false) override; @@ -471,7 +476,7 @@ namespace lib4neuro { * @param error_vector * @return */ - LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::shared_ptr<std::vector<double>> parameter_vector, std::vector<double> &error_vector) override; + LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::vector<double>* parameter_vector, std::vector<double> &error_vector) override; /** * @@ -518,22 +523,22 @@ namespace lib4neuro { size_t batch = 0) override; LIB4NEURO_API void - calculate_residual_gradient(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> gradient, + calculate_residual_gradient(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* gradient, double h = 1e-3) override; LIB4NEURO_API double - calculate_single_residual(std::shared_ptr<std::vector<double>> input, - std::shared_ptr<std::vector<double>> output, - std::shared_ptr<std::vector<double>> parameters = nullptr) override; + calculate_single_residual(std::vector<double>* input, + std::vector<double>* output, + std::vector<double>* parameters = nullptr) override; /** * * @return */ - LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override; + LIB4NEURO_API std::vector<double>* get_parameters() override; /** * diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp index 390ca5040c4faf24cb2abe32c6bdd208ae99a0e4..d74410235856521cb8b5ecbaf1989c95548dbc8a 100644 --- a/src/LearningMethods/GradientDescent.cpp +++ b/src/LearningMethods/GradientDescent.cpp @@ -62,7 +62,7 @@ namespace lib4neuro { while( error_current >= error_previous ){ (*parameters_after)[max_dir_idx] = (*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx]; - error_current = ef.eval( parameters_after ); + error_current = ef.eval( parameters_after.get() ); if( step_coefficient < 1e-32){ // COUT_DEBUG(" Attempting to find a feasible direction in one dimension was NOT SUCCESSFUL" << std::endl); for (i = 0; i < direction->size(); ++i) { @@ -111,11 +111,11 @@ namespace lib4neuro { size_t n_parameters = ef.get_dimension(); - std::shared_ptr<std::vector<double>> gradient_current(new std::vector<double>(n_parameters)); - std::shared_ptr<std::vector<double>> gradient_prev(new std::vector<double>(n_parameters)); - std::shared_ptr<std::vector<double>> params_current = ef.get_parameters(); - std::shared_ptr<std::vector<double>> params_prev(new std::vector<double>(n_parameters)); - std::shared_ptr<std::vector<double>> ptr_mem; + std::vector<double>* gradient_current(new std::vector<double>(n_parameters)); + std::vector<double>* gradient_prev(new std::vector<double>(n_parameters)); + std::vector<double>* params_current = ef.get_parameters(); + std::vector<double>* params_prev(new std::vector<double>(n_parameters)); + std::vector<double>* ptr_mem; // std::vector<double> gradient_mem( n_parameters ); // std::vector<double> parameters_analytical( n_parameters ); @@ -295,9 +295,10 @@ namespace lib4neuro { #endif } - this->optimal_parameters = *params_current; - std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->optimal_parameters); - ef.get_network_instance()->copy_parameter_space( params ); + this->optimal_parameters = params_current; +// std::shared_ptr<std::vector<double>> params; +// params.reset(this->optimal_parameters); + ef.get_network_instance()->copy_parameter_space( this->optimal_parameters ); // delete gradient_current; // delete gradient_prev; @@ -305,9 +306,9 @@ namespace lib4neuro { // delete params_prev; } - std::shared_ptr<std::vector<double>> GradientDescent::get_parameters() { - std::shared_ptr<std::vector<double>> ret; - ret.reset(&this->optimal_parameters); - return ret; + std::vector<double>* GradientDescent::get_parameters() { +// std::shared_ptr<std::vector<double>> ret; +// ret.reset(&this->optimal_parameters); + return this->optimal_parameters; } } diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h index ab507466a1073049d7f99e2af64a2a7d7ec52648..3560c5123c21471d0c6d63ab1f3dbb3d137402fb 100644 --- a/src/LearningMethods/GradientDescent.h +++ b/src/LearningMethods/GradientDescent.h @@ -44,7 +44,7 @@ namespace lib4neuro { /** * Vector of minima coordinates */ - std::vector<double> optimal_parameters; + std::vector<double>* optimal_parameters = new std::vector<double>(5); /** * Adaptive calculation of the step-size based on several historical characteristics. @@ -116,7 +116,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override; + LIB4NEURO_API std::vector<double>* get_parameters() override; }; } diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp index 5577777f6e4a594594ce1996f5c64b75d7b6f7a9..3d8626d0d0c1b85064cd69a9db84eaa2827f8ab9 100644 --- a/src/LearningMethods/GradientDescentBB.cpp +++ b/src/LearningMethods/GradientDescentBB.cpp @@ -53,13 +53,13 @@ namespace lib4neuro { size_t n_parameters = ef.get_dimension(); - std::shared_ptr<std::vector<double>> gradient_current(new std::vector<double>(n_parameters)); - std::shared_ptr<std::vector<double>> gradient_prev(new std::vector<double>(n_parameters)); - std::shared_ptr<std::vector<double>> params_current = ef.get_parameters(); - std::shared_ptr<std::vector<double>> params_prev(new std::vector<double>(n_parameters)); - std::shared_ptr<std::vector<double>> params_best(new std::vector<double>(*params_current)); + std::vector<double>* gradient_current(new std::vector<double>(n_parameters)); + std::vector<double>* gradient_prev(new std::vector<double>(n_parameters)); + std::vector<double>* params_current = ef.get_parameters(); + std::vector<double>* params_prev(new std::vector<double>(n_parameters)); + std::vector<double>* params_best(new std::vector<double>(*params_current)); - std::shared_ptr<std::vector<double>> ptr_mem; + std::vector<double>* ptr_mem; double alpha = -1.0, cc, gg; std::vector<double> dot__( 3 ); @@ -209,11 +209,12 @@ namespace lib4neuro { #endif } - this->optimal_parameters = *params_best; + this->optimal_parameters = params_best; // ef.analyze_error_gradient(*params_current, *gradient_current, 1.0, this->batch); - std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->optimal_parameters); - ef.get_network_instance()->copy_parameter_space(params); +// std::vector<double>* params; +// params.reset(this->optimal_parameters); + ef.get_network_instance()->copy_parameter_space(this->optimal_parameters); // // delete gradient_current; // delete gradient_prev; @@ -222,10 +223,10 @@ namespace lib4neuro { // delete params_best; } - std::shared_ptr<std::vector<double>> GradientDescentBB::get_parameters() { - std::shared_ptr<std::vector<double>> ret; - ret.reset(&this->optimal_parameters); - return ret; + std::vector<double>* GradientDescentBB::get_parameters() { +// std::vector<double>* ret; +// ret.reset(&this->optimal_parameters); + return this->optimal_parameters; } } diff --git a/src/LearningMethods/GradientDescentBB.h b/src/LearningMethods/GradientDescentBB.h index 568c856525c4132b4c31eb4d3b3622502a8351b0..5b376f0728e4343123efb0ce5c6cbf3c3698712e 100644 --- a/src/LearningMethods/GradientDescentBB.h +++ b/src/LearningMethods/GradientDescentBB.h @@ -55,7 +55,7 @@ namespace lib4neuro { /** * Vector of minima coordinates */ - std::vector<double> optimal_parameters; + std::vector<double>* optimal_parameters = new std::vector<double>(5); public: @@ -82,7 +82,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override; + LIB4NEURO_API std::vector<double>* get_parameters() override; }; } diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp index 953396e6bffcfc68b20dc247e4d8ee9b8c141bb9..1340f33e4a9fda0977fd111e8e2af9174217099a 100644 --- a/src/LearningMethods/GradientDescentSingleItem.cpp +++ b/src/LearningMethods/GradientDescentSingleItem.cpp @@ -20,10 +20,10 @@ namespace lib4neuro { } GradientDescentSingleItem::~GradientDescentSingleItem() { -// if (this->optimal_parameters) { -// delete this->optimal_parameters; -// this->optimal_parameters = nullptr; -// } + if (this->optimal_parameters) { + delete this->optimal_parameters; + this->optimal_parameters = nullptr; + } } @@ -44,7 +44,7 @@ namespace lib4neuro { (*shifted_x).at(i) = x.at(i) - alpha * d.at(i); } - value_shifted = f.eval( shifted_x ); + value_shifted = f.eval( shifted_x.get() ); } // std::cout << "Error reduction: " << value - value_shifted << std::endl; return alpha; @@ -98,16 +98,17 @@ namespace lib4neuro { } COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements << ", # of elements with high error: " << updated_elements << ", max. error: " << max_error << std::endl); - this->optimal_parameters = parameter_vector; - std::shared_ptr<std::vector<double>> opt_params = std::make_shared<std::vector<double>>(this->optimal_parameters); - ef.get_network_instance()->copy_parameter_space( opt_params ); + this->optimal_parameters = ¶meter_vector; +// std::shared_ptr<std::vector<double>> opt_params; +// opt_params.reset(this->optimal_parameters); + ef.get_network_instance()->copy_parameter_space( this->optimal_parameters ); } - std::shared_ptr<std::vector<double>> GradientDescentSingleItem::get_parameters() { - std::shared_ptr<std::vector<double>> ret; - ret.reset(&this->optimal_parameters); - return ret; + std::vector<double>* GradientDescentSingleItem::get_parameters() { +// std::shared_ptr<std::vector<double>> ret; +// ret.reset(&this->optimal_parameters); + return this->optimal_parameters; } } diff --git a/src/LearningMethods/GradientDescentSingleItem.h b/src/LearningMethods/GradientDescentSingleItem.h index bc57967f08173ae3abb5dc9ebbf65aa11a4d61c6..49e8d20d8707f34f6be1b204d92b403632e6743e 100644 --- a/src/LearningMethods/GradientDescentSingleItem.h +++ b/src/LearningMethods/GradientDescentSingleItem.h @@ -56,7 +56,7 @@ namespace lib4neuro { /** * Vector of minima coordinates */ - std::vector<double> optimal_parameters; + std::vector<double>* optimal_parameters = new std::vector<double>(5); protected: @@ -97,7 +97,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override; + LIB4NEURO_API std::vector<double>* get_parameters() override; }; } diff --git a/src/LearningMethods/LearningMethod.h b/src/LearningMethods/LearningMethod.h index 87c129ae2ff4a52ed0a04a7a55fcbf74d8920720..7173ce75870c1df8b58dd4dd2d0a31ec96de8507 100644 --- a/src/LearningMethods/LearningMethod.h +++ b/src/LearningMethods/LearningMethod.h @@ -24,7 +24,7 @@ namespace lib4neuro { /** * Updates the optimal weight&bias settings in the passed vector */ - virtual std::shared_ptr<std::vector<double>> get_parameters() = 0; + virtual std::vector<double>* get_parameters() = 0; }; class GradientLearningMethod : public LearningMethod { diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp index 31e05b6ad197d78591391c2cceab80a8de205edf..39502e6f983e398e8addcdc49b0358243eeff2c9 100644 --- a/src/LearningMethods/LearningSequence.cpp +++ b/src/LearningMethods/LearningSequence.cpp @@ -18,14 +18,14 @@ namespace lib4neuro { LearningSequence::~LearningSequence() = default; - std::shared_ptr<std::vector<double>> LearningSequence::get_parameters() { + std::vector<double>* LearningSequence::get_parameters() { if( this->learning_sequence.size() > 0 ){ return this->learning_sequence.at(0).get()->get_parameters( ); } - std::shared_ptr<std::vector<double>> ret; - ret.reset(); - return ret; +// std::shared_ptr<std::vector<double>> ret; +// ret.reset(); + return nullptr; } void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) { @@ -43,14 +43,14 @@ namespace lib4neuro { double the_best_error = error; int mcycles = this->max_number_of_cycles, cycle_idx = 0; - std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->best_parameters); +// std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->best_parameters); while( error > this->tol && mcycles != 0){ mcycles--; cycle_idx++; puts("*********************** 7"); - for( auto m: this->learning_sequence ){ + for(auto m: this->learning_sequence ){ puts("*********************** 8"); @@ -60,6 +60,8 @@ namespace lib4neuro { error = ef.eval(m->get_parameters()); puts("*********************** 10"); +// std::shared_ptr<std::vector<double>> params; +// params.reset(m->get_parameters()); ef.get_network_instance()->copy_parameter_space(m->get_parameters()); @@ -67,15 +69,17 @@ namespace lib4neuro { if( error < the_best_error ){ the_best_error = error; this->best_parameters = *ef.get_parameters(); +// best_params = ef.get_parameters(); +// best_params.reset(ef.get_parameters().get()); } if( error <= this->tol ){ - ef.get_network_instance()->copy_parameter_space( best_params ); + ef.get_network_instance()->copy_parameter_space( &this->best_parameters ); return; } } COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl ); } - ef.get_network_instance()->copy_parameter_space( best_params ); + ef.get_network_instance()->copy_parameter_space( &this->best_parameters ); } } \ No newline at end of file diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h index ee102f36c293bcf77f32d433e1e2939357c15209..7643a3449ecca15fd00a9169216c8b76bd9b726e 100644 --- a/src/LearningMethods/LearningSequence.h +++ b/src/LearningMethods/LearningSequence.h @@ -65,7 +65,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override; + LIB4NEURO_API std::vector<double>* get_parameters() override; /** * diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp index 295f1a99f02155c445b997546bd03348a471a523..98d248af1f7366f09d9a7cecda95d4607d2f070c 100644 --- a/src/LearningMethods/LevenbergMarquardt.cpp +++ b/src/LearningMethods/LevenbergMarquardt.cpp @@ -92,7 +92,7 @@ namespace lib4neuro { this->p_impl->lambda_increase = lambda_increase; this->p_impl->lambda_decrease = lambda_decrease; this->p_impl->maximum_niters = max_iters; - this->p_impl->optimal_parameters; // = new std::vector<double>(); +// this->p_impl->optimal_parameters = new std::vector<double>(5); } void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef, @@ -122,7 +122,7 @@ namespace lib4neuro { if( this->p_impl->batch_size > 0 ){ n_data_points = this->p_impl->batch_size; } - std::shared_ptr<std::vector<double>> params_current = ef.get_parameters(); + std::vector<double>* params_current = ef.get_parameters(); std::shared_ptr<std::vector<double>> params_tmp; params_tmp.reset(new std::vector<double>(n_parameters)); @@ -194,7 +194,7 @@ namespace lib4neuro { update_norm += update.at(i) * update.at(i); } update_norm = std::sqrt(update_norm); - current_err = ef.eval(params_tmp); + current_err = ef.eval(params_tmp.get()); /* Check, if the parameter update improved the function */ if(current_err < prev_err) { @@ -234,17 +234,23 @@ namespace lib4neuro { /* Store the optimized parameters */ this->p_impl->optimal_parameters = *params_current; - std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->p_impl->optimal_parameters); - ef.get_network_instance()->copy_parameter_space(params); + /* Dealloc vector of parameters */ + if(params_current) { + delete params_current; + params_current = nullptr; + } + +// std::shared_ptr<std::vector<double>> params = std::make_shared<std::vector<double>>(this->p_impl->optimal_parameters); + ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters); // delete params_tmp; } - std::shared_ptr<std::vector<double>> LevenbergMarquardt::get_parameters() { - std::shared_ptr<std::vector<double>> ret; - ret.reset(&this->p_impl->optimal_parameters); - return ret; + std::vector<double>* LevenbergMarquardt::get_parameters() { +// std::shared_ptr<std::vector<double>> ret; +// ret.reset(&this->p_impl->optimal_parameters); + return &this->p_impl->optimal_parameters; } LevenbergMarquardt::~LevenbergMarquardt() = default; diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h index 8f1906902a1cabddf5b724a867ca67e01045e200..b831b62a178f53e3d3e1626e53ba6d08b504fc88 100644 --- a/src/LearningMethods/LevenbergMarquardt.h +++ b/src/LearningMethods/LevenbergMarquardt.h @@ -47,7 +47,7 @@ namespace lib4neuro { LM_UPDATE_TYPE update_type, std::ofstream* ofs = nullptr); - std::shared_ptr<std::vector<double>> get_parameters() override; + std::vector<double>* get_parameters() override; ~LevenbergMarquardt(); }; diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp index ed4590e2d96516685d6bf3bf38b9d683b9922f7c..23dca22026dfa4996ab6e11d62146a16f95a7c90 100644 --- a/src/LearningMethods/ParticleSwarm.cpp +++ b/src/LearningMethods/ParticleSwarm.cpp @@ -81,8 +81,8 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *domain_bou (*this->optimal_coordinate)[i] = (*this->coordinate)[i]; } - std::shared_ptr<std::vector<double>> coord(this->coordinate); - this->optimal_value = this->ef->eval(coord); +// std::shared_ptr<std::vector<double>> coord(this->coordinate); + this->optimal_value = this->ef->eval(this->coordinate); } @@ -119,8 +119,8 @@ Particle::Particle(lib4neuro::ErrorFunction *ef, std::vector<double> *central_sy (*this->optimal_coordinate)[i] = (*this->coordinate)[i]; } - std::shared_ptr<std::vector<double>> coord(this->coordinate); - this->optimal_value = this->ef->eval(coord); +// std::shared_ptr<std::vector<double>> coord(this->coordinate); + this->optimal_value = this->ef->eval(this->coordinate); } @@ -210,8 +210,8 @@ double Particle::change_coordinate(double w, double c1, double c2, std::vector<d output += std::abs(vel_mem); } - std::shared_ptr<std::vector<double>> coord(this->coordinate); - vel_mem = this->ef->eval(coord); +// std::shared_ptr<std::vector<double>> coord(this->coordinate); + vel_mem = this->ef->eval(this->coordinate); this->current_val = vel_mem; if(vel_mem < this->optimal_value){ @@ -306,14 +306,14 @@ namespace lib4neuro { if (this->particle_swarm.at(pi)) { delete this->particle_swarm.at(pi); } - this->particle_swarm.at(pi) = new Particle(&ef, ef.get_parameters().get(), this->radius_factor); + this->particle_swarm.at(pi) = new Particle(&ef, ef.get_parameters(), this->radius_factor); } this->radius_factor *= 1.25; // if (!this->p_min_glob) { // this->p_min_glob = new std::vector<double>(this->func_dim); // } else { - this->p_min_glob.resize(this->func_dim); + this->p_min_glob->resize(this->func_dim); // } size_t outer_it = 0; @@ -333,7 +333,7 @@ namespace lib4neuro { double euclidean_dist; double current_err = -1; - this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value); + this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value); // for(unsigned int i = 0; i < this->n_particles; ++i){ // this->particle_swarm[i]->print_coordinate(); // } @@ -346,10 +346,10 @@ namespace lib4neuro { ////////////////////////////////////////////////// // Clustering algorithm - termination condition // ////////////////////////////////////////////////// - particle = this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value); + particle = this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value); - if (std::find(global_best_vec.begin(), global_best_vec.end(), this->p_min_glob) == global_best_vec.end()) { - global_best_vec.emplace_back(this->p_min_glob); // TODO rewrite as std::set + if (std::find(global_best_vec.begin(), global_best_vec.end(), *this->p_min_glob) == global_best_vec.end()) { + global_best_vec.emplace_back(*this->p_min_glob); // TODO rewrite as std::set } cluster.insert(particle); @@ -373,7 +373,7 @@ namespace lib4neuro { for (size_t pi = 0; pi < this->n_particles; pi++) { particle = this->particle_swarm.at(pi); - tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->p_min_glob, + tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, *this->p_min_glob, global_best_vec); // particle->print_coordinate(); @@ -413,8 +413,8 @@ namespace lib4neuro { // } // } - std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob); - current_err = ef.eval(coord); +// std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob); + current_err = ef.eval(this->p_min_glob); COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it) << ". Total error: " << current_err @@ -447,7 +447,7 @@ namespace lib4neuro { << ". Objective function value: " << optimal_value << "." << std::endl ); - this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value); + this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value); //TODO rewrite following output using COUT_INFO if (outer_it < this->iter_max) { /* Convergence reached */ @@ -457,8 +457,9 @@ namespace lib4neuro { COUT_INFO( std::endl << "Max number of iterations reached (" << outer_it << ")! Objective function value: " << optimal_value <<std:: endl); } - std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob); - ef.get_network_instance()->copy_parameter_space(coord); +// std::shared_ptr<std::vector<double>> coord; +// coord.reset(this->p_min_glob); + ef.get_network_instance()->copy_parameter_space(this->p_min_glob); delete centroid; } @@ -533,10 +534,10 @@ namespace lib4neuro { return std::sqrt(dist); } - std::shared_ptr<std::vector<double>> ParticleSwarm::get_parameters() { - std::shared_ptr<std::vector<double>> ret; - ret.reset(&this->p_min_glob); - return ret; + std::vector<double>* ParticleSwarm::get_parameters() { +// std::shared_ptr<std::vector<double>> ret; +// ret.reset(&this->p_min_glob); + return this->p_min_glob; } void ParticleSwarm::init_constructor(std::vector<double>* domain_bounds, diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h index 80da9d60bc3525491b4c6d98a41ed0f575da811d..6cd389f6bd1b97cfb5331d0e9d1bc701021d8219 100644 --- a/src/LearningMethods/ParticleSwarm.h +++ b/src/LearningMethods/ParticleSwarm.h @@ -200,7 +200,7 @@ namespace lib4neuro { /** * Coordinates of the found global minima */ - std::vector<double> p_min_glob; + std::vector<double>* p_min_glob = new std::vector<double>; protected: /** @@ -309,7 +309,7 @@ namespace lib4neuro { * * @return */ - LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override; + LIB4NEURO_API std::vector<double>* get_parameters() override; }; } diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp index f7dd50310fa1bc9ac3af1d9de05861eefce91374..d022e8e4d62f89fe0f9138a2a5cff2544a3ec1d3 100644 --- a/src/LearningMethods/RandomSolution.cpp +++ b/src/LearningMethods/RandomSolution.cpp @@ -14,20 +14,29 @@ namespace lib4neuro { // this->optimal_parameters = new std::vector<double>(); } - RandomSolution::~RandomSolution() { + RandomSolution::~RandomSolution() {} - } + std::vector<double>* RandomSolution::get_parameters() { +// std::shared_ptr<std::vector<double>> ret; +// ret.reset(&this->optimal_parameters); +// return ret; - std::shared_ptr<std::vector<double>> RandomSolution::get_parameters() { - std::shared_ptr<std::vector<double>> ret; - ret.reset(&this->optimal_parameters); - return ret; + return &this->optimal_parameters; } void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) { ef.get_network_instance()->randomize_parameters(); - this->optimal_parameters = *ef.get_parameters(); - COUT_INFO("Producing a random solution... error: " << ef.eval(ef.get_parameters()) << std::endl); + + auto tmp = ef.get_parameters(); + + this->optimal_parameters = *tmp; + + if(tmp) { + delete tmp; + tmp = nullptr; + } + + COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl); } } \ No newline at end of file diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h index e7088f55def238b60e019b98e169cda3511bddee..4b66a060bf54fe0dbec4c3a9e095fd7879c1d8dc 100644 --- a/src/LearningMethods/RandomSolution.h +++ b/src/LearningMethods/RandomSolution.h @@ -26,10 +26,10 @@ namespace lib4neuro { ~RandomSolution(); - virtual void optimize(lib4neuro::ErrorFunction &ef, - std::ofstream *ofs = nullptr) override; + void optimize(lib4neuro::ErrorFunction &ef, + std::ofstream *ofs = nullptr) override; - virtual std::shared_ptr<std::vector<double>> get_parameters() override; + std::vector<double>* get_parameters() override; }; diff --git a/src/Network/NeuralNetwork.cpp b/src/Network/NeuralNetwork.cpp index 0edfb96db6ff4aae2c5cb0b87fa1cbbd3356f2ab..25ff32608c1e1a3ecc64cafa32f4f736be7e1fdd 100644 --- a/src/Network/NeuralNetwork.cpp +++ b/src/Network/NeuralNetwork.cpp @@ -411,7 +411,7 @@ namespace lib4neuro { } void NeuralNetwork::eval_single_debug(::std::vector<double> &input, ::std::vector<double> &output, - ::std::shared_ptr<std::vector<double>> custom_weights_and_biases) { + std::vector<double>* custom_weights_and_biases) { if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) { THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!"); } @@ -427,6 +427,8 @@ namespace lib4neuro { double potential, bias; int bias_idx; +// std::shared_ptr<std::vector<double>> params; +// params.reset(custom_weights_and_biases); this->copy_parameter_space(custom_weights_and_biases); this->analyze_layer_structure(); @@ -542,7 +544,7 @@ namespace lib4neuro { this->layers_analyzed = false; } - void NeuralNetwork::copy_parameter_space(std::shared_ptr<std::vector<double>> parameters) { + void NeuralNetwork::copy_parameter_space(std::vector<double>* parameters) { if (parameters != nullptr) { for (unsigned int i = 0; i < this->connection_weights.size(); ++i) { this->connection_weights.at(i) = (*parameters).at(i); @@ -575,7 +577,7 @@ namespace lib4neuro { void NeuralNetwork::eval_single(::std::vector<double>& input, ::std::vector<double>& output, - ::std::shared_ptr<std::vector<double>> custom_weights_and_biases) { + std::vector<double>* custom_weights_and_biases) { if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) { THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!"); @@ -592,6 +594,8 @@ namespace lib4neuro { double potential, bias; int bias_idx; +// std::shared_ptr<std::vector<double>> params; +// params.reset(custom_weights_and_biases); this->copy_parameter_space(custom_weights_and_biases); this->analyze_layer_structure(); diff --git a/src/Network/NeuralNetwork.h b/src/Network/NeuralNetwork.h index 54d8b5635ff780ead151dd68c0b7945869dd0a3e..5205b598fbd7d0f6ebdc4a5b50665ca6d7d13c28 100644 --- a/src/Network/NeuralNetwork.h +++ b/src/Network/NeuralNetwork.h @@ -184,7 +184,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API virtual void eval_single_debug(std::vector<double> &input, std::vector<double> &output, - std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr); + std::vector<double>* custom_weights_and_biases = nullptr); /** @@ -232,7 +232,7 @@ namespace lib4neuro { * Replaces the values in @{this->connection_weights} and @{this->neuron_biases} by the provided values * @param parameters */ - LIB4NEURO_API virtual void copy_parameter_space(std::shared_ptr<std::vector<double>> parameters); + LIB4NEURO_API virtual void copy_parameter_space(std::vector<double>* parameters); /** * Copies the pointers @{this->connection_weights} and @{this->neuron_biases} from the parental network, sets @@ -248,7 +248,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API virtual void eval_single(std::vector<double> &input, std::vector<double> &output, - std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr); + std::vector<double>* custom_weights_and_biases = nullptr); /** * diff --git a/src/Network/NeuralNetworkSum.cpp b/src/Network/NeuralNetworkSum.cpp index d4a66ab33b4332a9b357b7bd8eff1b0532148514..2fc57ef9e7e23f7a2dee81708f4a7456d98bcb60 100644 --- a/src/Network/NeuralNetworkSum.cpp +++ b/src/Network/NeuralNetworkSum.cpp @@ -49,7 +49,7 @@ namespace lib4neuro { } void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output, - std::shared_ptr<std::vector<double>> custom_weights_and_biases) { + std::vector<double>* custom_weights_and_biases) { std::vector<double> mem_output(output.size()); std::fill(output.begin(), output.end(), 0.0); @@ -155,7 +155,7 @@ namespace lib4neuro { } void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output, - std::shared_ptr<std::vector<double>> custom_weights_and_biases) { + std::vector<double>* custom_weights_and_biases) { std::vector<double> mem_output(output.size()); std::fill(output.begin(), output.end(), 0.0); diff --git a/src/Network/NeuralNetworkSum.h b/src/Network/NeuralNetworkSum.h index 0fcfc284c776b1bbdf970191eeb013c68b5259b1..fc6736079f09a8885a1c2cc53e85d5df09e7f009 100644 --- a/src/Network/NeuralNetworkSum.h +++ b/src/Network/NeuralNetworkSum.h @@ -41,7 +41,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API void eval_single(std::vector<double> &input, std::vector<double> &output, - std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr) override; + std::vector<double>* custom_weights_and_biases = nullptr) override; /** * @@ -50,7 +50,7 @@ namespace lib4neuro { * @param custom_weights_and_biases */ LIB4NEURO_API void eval_single_debug(std::vector<double> &input, std::vector<double> &output, - std::shared_ptr<std::vector<double>> custom_weights_and_biases = nullptr) override; + std::vector<double>* custom_weights_and_biases = nullptr) override; /** diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp index b3aa6697a27372fea46a0e6ef2bb30879c2384e6..b8f3caa6bfa6cf8b47420dc889a1a81dbfbb5c49 100644 --- a/src/Solvers/DESolver.cpp +++ b/src/Solvers/DESolver.cpp @@ -408,6 +408,8 @@ namespace lib4neuro { printf("error before optimization: %f\n", total_error.eval(nullptr)); learning_method.optimize(total_error); +// std::shared_ptr<std::vector<double>> params; +// params.reset(learning_method.get_parameters()); this->solution->copy_parameter_space(learning_method.get_parameters()); printf("error after optimization: %f\n", total_error.eval(nullptr)); @@ -428,7 +430,7 @@ namespace lib4neuro { DESolver::eval_equation(size_t equation_idx, std::shared_ptr<std::vector<double>>weight_and_biases, std::vector<double> &input) { std::vector<double> output(1); - this->differential_equations.at(equation_idx)->eval_single(input, output, weight_and_biases); + this->differential_equations.at(equation_idx)->eval_single(input, output, weight_and_biases.get()); // printf("Input: "); // for( auto e: input ){ diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp index 141de4534f8a35e7dc1b34bf6cc3e2900532c58c..2514bf08823f864dfb5fea9d6134c153dad564f7 100644 --- a/src/examples/net_test_1.cpp +++ b/src/examples/net_test_1.cpp @@ -49,8 +49,9 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e ); swarm_01.optimize( ef ); - std::shared_ptr<std::vector<double>> parameters = swarm_01.get_parameters(); - net.copy_parameter_space(parameters); +// std::shared_ptr<std::vector<double>> parameters; +// parameters.reset(swarm_01.get_parameters()); + net.copy_parameter_space(swarm_01.get_parameters()); /* ERROR CALCULATION */ std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl; @@ -64,8 +65,9 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction gd.optimize( ef ); - std::shared_ptr<std::vector<double>> parameters = gd.get_parameters(); - net.copy_parameter_space(parameters); +// std::shared_ptr<std::vector<double>> parameters; +// parameters.reset(gd.get_parameters()); + net.copy_parameter_space(gd.get_parameters()); /* ERROR CALCULATION */ std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr ) << std::endl; diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp index 09920239a2e6ea7b384fc535d6b6d931bbf4837f..3d193053339dab85ed614145e80d4fbc7beb5472 100644 --- a/src/examples/net_test_2.cpp +++ b/src/examples/net_test_2.cpp @@ -48,8 +48,9 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e ); swarm_01.optimize( ef ); - std::shared_ptr<std::vector<double>> parameters = swarm_01.get_parameters(); - net.copy_parameter_space(parameters); +// std::shared_ptr<std::vector<double>> parameters; +// parameters.reset(swarm_01.get_parameters()); + net.copy_parameter_space(swarm_01.get_parameters()); std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl; std::cout << "***********************************************************************************************************************" <<std::endl; @@ -61,8 +62,9 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction gd.optimize( ef ); - std::shared_ptr<std::vector<double>> parameters = gd.get_parameters(); - net.copy_parameter_space(parameters); +// std::shared_ptr<std::vector<double>> parameters; +// parameters.reset(gd.get_parameters()); + net.copy_parameter_space(gd.get_parameters()); /* ERROR CALCULATION */ std::cout << "Run finished! Error of the network[Gradient descent]: " << ef.eval( nullptr )<< std::endl; diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp index ab2acef2a87d09ed1c2e581e7d65be614c20506f..63b6b10d4fda167acaba2c6fc780123ada50b223 100644 --- a/src/examples/network_serialization.cpp +++ b/src/examples/network_serialization.cpp @@ -116,8 +116,8 @@ int main() { ); swarm_01.optimize( mse ); - std::shared_ptr<std::vector<double>>parameters = swarm_01.get_parameters(); - net.copy_parameter_space(parameters); + std::vector<double>* parameters = swarm_01.get_parameters(); + net.copy_parameter_space(swarm_01.get_parameters()); printf("w1 = %10.7f\n", parameters->at( 0 )); printf("w2 = %10.7f\n", parameters->at( 1 )); diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp index 2d68db5a803e74ae38d3371c5b6b1f54f133bd7e..dea822d988df489904328f8bbe4b02f0eca2d083 100644 --- a/src/examples/seminar.cpp +++ b/src/examples/seminar.cpp @@ -116,8 +116,9 @@ int main() { ); swarm_01.optimize( mse ); - std::shared_ptr<std::vector<double>> parameters = swarm_01.get_parameters( ); - XOR.copy_parameter_space(parameters); +// std::shared_ptr<std::vector<double>> parameters; +// parameters.reset(swarm_01.get_parameters( )); + XOR.copy_parameter_space(swarm_01.get_parameters( )); /* ERROR CALCULATION */ double error = 0.0;