Skip to content
Snippets Groups Projects
Commit 54a9fca6 authored by Martin Beseda's avatar Martin Beseda
Browse files

[MERGE] merged with kra568/tmp

parents 8fe9f5b8 35ac7bb1
No related branches found
No related tags found
No related merge requests found
Showing
with 60 additions and 71 deletions
......@@ -75,20 +75,19 @@ namespace lib4neuro {
return this->ds_test;
}
std::vector<double>* ErrorFunction::get_parameters() {
std::vector<double>* output;
output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases());
std::vector<double> ErrorFunction::get_parameters() {
std::vector<double> output(this->net->get_n_weights() + this->net->get_n_biases());
// std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases());
size_t i = 0;
for (auto el: *this->net->get_parameter_ptr_weights()) {
output->at(i) = el;
output[i] = el;
++i;
}
for (auto el: *this->net->get_parameter_ptr_biases()) {
output->at(i) = el;
output[i] = el;
++i;
}
......@@ -380,7 +379,7 @@ namespace lib4neuro {
//TODO check input vector sizes - they HAVE TO be allocated before calling this function
size_t n_parameters = this->get_dimension();
std::vector<double>* parameters = this->get_parameters();
std::vector<double> parameters = this->get_parameters();
double delta; // Complete step size
double former_parameter_value;
......@@ -388,23 +387,23 @@ namespace lib4neuro {
double f_val2; // f(x - delta)
for (size_t i = 0; i < n_parameters; i++) {
delta = h * (1 + std::abs(parameters->at(i)));
former_parameter_value = parameters->at(i);
delta = h * (1 + std::abs(parameters[i]));
former_parameter_value = parameters[i];
if(delta != 0) {
/* Computation of f_val1 = f(x + delta) */
parameters->at(i) = former_parameter_value + delta;
f_val1 = this->calculate_single_residual(input, output, parameters);
parameters[i] = former_parameter_value + delta;
f_val1 = this->calculate_single_residual(input, output, &parameters);
/* Computation of f_val2 = f(x - delta) */
parameters->at(i) = former_parameter_value - delta;
f_val2 = this->calculate_single_residual(input, output, parameters);
parameters[i] = former_parameter_value - delta;
f_val2 = this->calculate_single_residual(input, output, &parameters);
gradient->at(i) = (f_val1 - f_val2) / (2*delta);
}
/* Restore parameter to the former value */
parameters->at(i) = former_parameter_value;
parameters[i] = former_parameter_value;
}
}
......@@ -679,7 +678,7 @@ namespace lib4neuro {
return this->dimension;
}
std::vector<double>* ErrorSum::get_parameters() {
std::vector<double> ErrorSum::get_parameters() {
return this->summand->at(0)->get_parameters();
}
......
......@@ -68,7 +68,7 @@ namespace lib4neuro {
*
* @return
*/
virtual std::vector<double>* get_parameters();
virtual std::vector<double> get_parameters();
/**
* @return
......@@ -538,7 +538,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API std::vector<double>* get_parameters() override;
LIB4NEURO_API std::vector<double> get_parameters() override;
/**
*
......
......@@ -113,7 +113,7 @@ namespace lib4neuro {
std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
std::vector<double>* gradient_prev(new std::vector<double>(n_parameters));
std::vector<double>* params_current = ef.get_parameters();
std::vector<double>* params_current = new std::vector<double>(ef.get_parameters());
std::vector<double>* params_prev(new std::vector<double>(n_parameters));
std::vector<double>* ptr_mem;
......@@ -306,9 +306,9 @@ namespace lib4neuro {
// delete params_prev;
}
std::vector<double>* GradientDescent::get_parameters() {
std::vector<double> GradientDescent::get_parameters() {
// std::shared_ptr<std::vector<double>> ret;
// ret.reset(&this->optimal_parameters);
return this->optimal_parameters;
return std::vector<double>(*this->optimal_parameters);
}
}
......@@ -116,7 +116,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API std::vector<double>* get_parameters() override;
LIB4NEURO_API std::vector<double> get_parameters() override;
};
}
......
......@@ -55,7 +55,7 @@ namespace lib4neuro {
std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
std::vector<double>* gradient_prev(new std::vector<double>(n_parameters));
std::vector<double>* params_current = ef.get_parameters();
std::vector<double>* params_current = new std::vector<double>(ef.get_parameters());
std::vector<double>* params_prev(new std::vector<double>(n_parameters));
std::vector<double>* params_best(new std::vector<double>(*params_current));
......@@ -223,10 +223,10 @@ namespace lib4neuro {
// delete params_best;
}
std::vector<double>* GradientDescentBB::get_parameters() {
std::vector<double> GradientDescentBB::get_parameters() {
// std::vector<double>* ret;
// ret.reset(&this->optimal_parameters);
return this->optimal_parameters;
return std::vector<double>(*this->optimal_parameters);
}
}
......@@ -82,7 +82,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API std::vector<double>* get_parameters() override;
LIB4NEURO_API std::vector<double> get_parameters() override;
};
}
......
......@@ -61,7 +61,7 @@ namespace lib4neuro {
size_t iter_idx = this->maximum_niters;
size_t dim = ef.get_network_instance()->get_n_biases() + ef.get_network_instance()->get_n_weights();
std::vector<double> parameter_vector = *ef.get_parameters();
std::vector<double> parameter_vector = ef.get_parameters();
std::vector<double> gradient_vector(dim);
std::vector<double> search_direction(dim);
std::vector<double> error_vector(ef.get_network_instance()->get_n_outputs());
......@@ -73,7 +73,7 @@ namespace lib4neuro {
updated_elements = 0;
std::fill(search_direction.begin(), search_direction.end(), 0);
for( size_t i = 0; i < ef.get_dataset()->get_n_elements(); ++i){
error = ef.eval_single_item_by_idx( i, ef.get_parameters(), error_vector );
error = ef.eval_single_item_by_idx( i, &parameter_vector, error_vector );
if( error > max_error ){
max_error = error;
......@@ -105,10 +105,10 @@ namespace lib4neuro {
}
std::vector<double>* GradientDescentSingleItem::get_parameters() {
std::vector<double> GradientDescentSingleItem::get_parameters() {
// std::shared_ptr<std::vector<double>> ret;
// ret.reset(&this->optimal_parameters);
return this->optimal_parameters;
return std::vector<double>(*this->optimal_parameters);
}
}
......@@ -97,7 +97,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API std::vector<double>* get_parameters() override;
LIB4NEURO_API std::vector<double> get_parameters() override;
};
}
......
......@@ -24,7 +24,7 @@ namespace lib4neuro {
/**
* Updates the optimal weight&bias settings in the passed vector
*/
virtual std::vector<double>* get_parameters() = 0;
virtual std::vector<double> get_parameters() = 0;
};
class GradientLearningMethod : public LearningMethod {
......
......@@ -18,14 +18,8 @@ namespace lib4neuro {
LearningSequence::~LearningSequence() = default;
std::vector<double>* LearningSequence::get_parameters() {
if( this->learning_sequence.size() > 0 ){
return this->learning_sequence.at(0).get()->get_parameters( );
}
// std::shared_ptr<std::vector<double>> ret;
// ret.reset();
return nullptr;
std::vector<double> LearningSequence::get_parameters() {
return this->best_parameters;
}
void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) {
......@@ -44,6 +38,7 @@ namespace lib4neuro {
int mcycles = this->max_number_of_cycles, cycle_idx = 0;
// std::shared_ptr<std::vector<double>> best_params = std::make_shared<std::vector<double>>(this->best_parameters);
std::vector<double> params;
while( error > this->tol && mcycles != 0){
mcycles--;
cycle_idx++;
......@@ -57,18 +52,19 @@ namespace lib4neuro {
m->optimize( ef, ofs );
puts("*********************** 9");
error = ef.eval(m->get_parameters());
params = m->get_parameters();
error = ef.eval(&params);
puts("*********************** 10");
// std::shared_ptr<std::vector<double>> params;
// params.reset(m->get_parameters());
ef.get_network_instance()->copy_parameter_space(m->get_parameters());
ef.get_network_instance()->copy_parameter_space(&params);
if( error < the_best_error ){
the_best_error = error;
this->best_parameters = *ef.get_parameters();
this->best_parameters = ef.get_parameters();
// best_params = ef.get_parameters();
// best_params.reset(ef.get_parameters().get());
}
......
......@@ -65,7 +65,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API std::vector<double>* get_parameters() override;
LIB4NEURO_API std::vector<double> get_parameters() override;
/**
*
......
......@@ -122,7 +122,7 @@ namespace lib4neuro {
if( this->p_impl->batch_size > 0 ){
n_data_points = this->p_impl->batch_size;
}
std::vector<double>* params_current = ef.get_parameters();
std::vector<double>* params_current = new std::vector<double>(ef.get_parameters());
std::shared_ptr<std::vector<double>> params_tmp;
params_tmp.reset(new std::vector<double>(n_parameters));
......@@ -247,10 +247,10 @@ namespace lib4neuro {
}
std::vector<double>* LevenbergMarquardt::get_parameters() {
std::vector<double> LevenbergMarquardt::get_parameters() {
// std::shared_ptr<std::vector<double>> ret;
// ret.reset(&this->p_impl->optimal_parameters);
return &this->p_impl->optimal_parameters;
return this->p_impl->optimal_parameters;
}
LevenbergMarquardt::~LevenbergMarquardt() = default;
......
......@@ -47,7 +47,7 @@ namespace lib4neuro {
LM_UPDATE_TYPE update_type,
std::ofstream* ofs = nullptr);
std::vector<double>* get_parameters() override;
std::vector<double> get_parameters() override;
~LevenbergMarquardt();
};
......
......@@ -306,14 +306,14 @@ namespace lib4neuro {
if (this->particle_swarm.at(pi)) {
delete this->particle_swarm.at(pi);
}
this->particle_swarm.at(pi) = new Particle(&ef, ef.get_parameters(), this->radius_factor);
this->particle_swarm.at(pi) = new Particle(&ef, new std::vector<double>(ef.get_parameters()), this->radius_factor);
}
this->radius_factor *= 1.25;
// if (!this->p_min_glob) {
// this->p_min_glob = new std::vector<double>(this->func_dim);
// } else {
this->p_min_glob->resize(this->func_dim);
this->p_min_glob.resize(this->func_dim);
// }
size_t outer_it = 0;
......@@ -333,7 +333,7 @@ namespace lib4neuro {
double euclidean_dist;
double current_err = -1;
this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
// for(unsigned int i = 0; i < this->n_particles; ++i){
// this->particle_swarm[i]->print_coordinate();
// }
......@@ -346,10 +346,10 @@ namespace lib4neuro {
//////////////////////////////////////////////////
// Clustering algorithm - termination condition //
//////////////////////////////////////////////////
particle = this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
particle = this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
if (std::find(global_best_vec.begin(), global_best_vec.end(), *this->p_min_glob) == global_best_vec.end()) {
global_best_vec.emplace_back(*this->p_min_glob); // TODO rewrite as std::set
if (std::find(global_best_vec.begin(), global_best_vec.end(), this->p_min_glob) == global_best_vec.end()) {
global_best_vec.emplace_back(this->p_min_glob); // TODO rewrite as std::set
}
cluster.insert(particle);
......@@ -373,7 +373,7 @@ namespace lib4neuro {
for (size_t pi = 0; pi < this->n_particles; pi++) {
particle = this->particle_swarm.at(pi);
tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, *this->p_min_glob,
tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->p_min_glob,
global_best_vec);
// particle->print_coordinate();
......@@ -414,7 +414,7 @@ namespace lib4neuro {
// }
// std::shared_ptr<std::vector<double>> coord = std::make_shared<std::vector<double>>(this->p_min_glob);
current_err = ef.eval(this->p_min_glob);
current_err = ef.eval(&this->p_min_glob);
COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
<< ". Total error: " << current_err
......@@ -447,7 +447,7 @@ namespace lib4neuro {
<< ". Objective function value: " << optimal_value
<< "." << std::endl );
this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
//TODO rewrite following output using COUT_INFO
if (outer_it < this->iter_max) {
/* Convergence reached */
......@@ -459,7 +459,7 @@ namespace lib4neuro {
// std::shared_ptr<std::vector<double>> coord;
// coord.reset(this->p_min_glob);
ef.get_network_instance()->copy_parameter_space(this->p_min_glob);
ef.get_network_instance()->copy_parameter_space(&this->p_min_glob);
delete centroid;
}
......@@ -534,7 +534,7 @@ namespace lib4neuro {
return std::sqrt(dist);
}
std::vector<double>* ParticleSwarm::get_parameters() {
std::vector<double> ParticleSwarm::get_parameters() {
// std::shared_ptr<std::vector<double>> ret;
// ret.reset(&this->p_min_glob);
return this->p_min_glob;
......
......@@ -200,7 +200,7 @@ namespace lib4neuro {
/**
* Coordinates of the found global minima
*/
std::vector<double>* p_min_glob = new std::vector<double>;
std::vector<double> p_min_glob;
protected:
/**
......@@ -309,7 +309,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API std::vector<double>* get_parameters() override;
LIB4NEURO_API std::vector<double> get_parameters() override;
};
}
......
......@@ -16,25 +16,18 @@ namespace lib4neuro {
RandomSolution::~RandomSolution() {}
std::vector<double>* RandomSolution::get_parameters() {
std::vector<double> RandomSolution::get_parameters() {
// std::shared_ptr<std::vector<double>> ret;
// ret.reset(&this->optimal_parameters);
// return ret;
return &this->optimal_parameters;
return this->optimal_parameters;
}
void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
ef.get_network_instance()->randomize_parameters();
auto tmp = ef.get_parameters();
this->optimal_parameters = *tmp;
if(tmp) {
delete tmp;
tmp = nullptr;
}
this->optimal_parameters = ef.get_parameters();
COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl);
}
......
......@@ -29,7 +29,7 @@ namespace lib4neuro {
void optimize(lib4neuro::ErrorFunction &ef,
std::ofstream *ofs = nullptr) override;
std::vector<double>* get_parameters() override;
std::vector<double> get_parameters() override;
};
......
......@@ -410,7 +410,8 @@ namespace lib4neuro {
learning_method.optimize(total_error);
// std::shared_ptr<std::vector<double>> params;
// params.reset(learning_method.get_parameters());
this->solution->copy_parameter_space(learning_method.get_parameters());
std::vector<double> params = learning_method.get_parameters();
this->solution->copy_parameter_space(&params);
printf("error after optimization: %f\n", total_error.eval(nullptr));
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment