Newer
Older

Michal Kravcenko
committed
/**
* DESCRIPTION OF THE FILE
*
* @author Michal Kravčenko
* @date 19.2.19 -
*/

Michal Kravcenko
committed
#include "GradientDescentSingleItem.h"
#include <random.hpp>
#include "message.h"
namespace lib4neuro {
GradientDescentSingleItem::GradientDescentSingleItem(double epsilon,
size_t n_to_restart,
int max_iters,
size_t batch) {

Michal Kravcenko
committed
this->restart_frequency = n_to_restart;
this->maximum_niters = max_iters;
this->batch = batch;

Michal Kravcenko
committed
}
GradientDescentSingleItem::~GradientDescentSingleItem() {
if (this->optimal_parameters) {
delete this->optimal_parameters;
this->optimal_parameters = nullptr;
}

Michal Kravcenko
committed
}
double GradientDescentSingleItem::get_optimal_step_size(lib4neuro::ErrorFunction& f,
std::vector<double>& x,
std::vector<double>& d,
size_t n_elems) {

Michal Kravcenko
committed
double alpha = 10.0 / n_elems;
alpha = 1.0;

Michal Kravcenko
committed
double value_shifted = value + 1.0;
std::shared_ptr<std::vector<double>> shifted_x = std::make_shared<std::vector<double>>(std::vector<double>(x));

Michal Kravcenko
committed
alpha *= 0.5;
(*shifted_x).at(i) = x.at(i) - alpha * d.at(i);

Michal Kravcenko
committed
}

Michal Kravcenko
committed
}
return alpha;
}
void GradientDescentSingleItem::optimize(lib4neuro::ErrorFunction& ef,
std::ofstream* ofs) {

Michal Kravcenko
committed
COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..."
COUT_INFO("Initial error: " << err_ << std::endl);

Michal Kravcenko
committed
size_t total_elements = ef.get_n_data_set(), updated_elements = 0, iter = 0;
double max_error = 1.0, error, gamma;
size_t iter_idx = this->maximum_niters;
size_t dim = ef.get_dimension();

Michal Kravcenko
committed
std::vector<double> parameter_vector = ef.get_parameters();

Michal Kravcenko
committed
std::vector<double> gradient_vector(dim);
std::vector<double> search_direction(dim);
std::vector<double> error_vector(ef.get_n_outputs());
while (max_error >= this->tolerance && iter_idx >= 1) {

Michal Kravcenko
committed
iter_idx--;
iter++;

Michal Kravcenko
committed
updated_elements = 0;
std::fill(search_direction.begin(),
search_direction.end(),
0);
for (size_t i = 0; i < total_elements; ++i) {
error = ef.eval_single_item_by_idx(i,
¶meter_vector,
error_vector);
if (error > max_error) {

Michal Kravcenko
committed
max_error = error;
}

Michal Kravcenko
committed
updated_elements++;
ef.calculate_error_gradient_single(error_vector,
gradient_vector);

Michal Kravcenko
committed
for (size_t j = 0; j < dim; ++j) {
search_direction[j] += gradient_vector[j];

Michal Kravcenko
committed
}
}
}
gamma = this->get_optimal_step_size(ef,
parameter_vector,
search_direction,
updated_elements);

Michal Kravcenko
committed
for (size_t j = 0; j < dim; ++j) {
parameter_vector[j] -= gamma * search_direction[j];

Michal Kravcenko
committed
}
COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements
<< ", # of elements with high error: " << updated_elements << ", max. error: "
<< max_error << "\r");

Michal Kravcenko
committed
}
COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements
<< ", # of elements with high error: " << updated_elements << ", max. error: "
<< max_error << std::endl);

Michal Kravcenko
committed
this->optimal_parameters = ¶meter_vector;
ef.set_parameters(*this->optimal_parameters);

Michal Kravcenko
committed