Skip to content
Snippets Groups Projects
GradientDescentSingleItem.cpp 4.67 KiB
Newer Older
  • Learn to ignore specific revisions
  • /**
     * DESCRIPTION OF THE FILE
     *
     * @author Michal Kravčenko
     * @date 19.2.19 -
     */
    
    
    #include "../mpi_wrapper.h"
    
    
    #include "GradientDescentSingleItem.h"
    
    #include <random.hpp>
    #include "message.h"
    
    namespace lib4neuro {
    
    Martin Beseda's avatar
    Martin Beseda committed
        GradientDescentSingleItem::GradientDescentSingleItem(double epsilon,
                                                             size_t n_to_restart,
                                                             int max_iters,
                                                             size_t batch) {
    
            this->tolerance         = epsilon;
    
            this->restart_frequency = n_to_restart;
    
            this->maximum_niters    = max_iters;
            this->batch             = batch;
    
        }
    
        GradientDescentSingleItem::~GradientDescentSingleItem() {
    
            if (this->optimal_parameters) {
                delete this->optimal_parameters;
                this->optimal_parameters = nullptr;
            }
    
    Martin Beseda's avatar
    Martin Beseda committed
        double GradientDescentSingleItem::get_optimal_step_size(lib4neuro::ErrorFunction& f,
                                                                std::vector<double>& x,
                                                                std::vector<double>& d,
                                                                size_t n_elems) {
    
            double value         = f.eval();
    
            std::shared_ptr<std::vector<double>> shifted_x = std::make_shared<std::vector<double>>(std::vector<double>(x));
    
    Martin Beseda's avatar
    Martin Beseda committed
            while (value_shifted > value) {
    
    Martin Beseda's avatar
    Martin Beseda committed
                for (size_t i = 0; i < x.size(); ++i) {
    
                    (*shifted_x).at(i) = x.at(i) - alpha * d.at(i);
    
    Martin Beseda's avatar
    Martin Beseda committed
                value_shifted = f.eval(shifted_x.get());
    
    Martin Beseda's avatar
    Martin Beseda committed
        void GradientDescentSingleItem::optimize(lib4neuro::ErrorFunction& ef,
                                                 std::ofstream* ofs) {
    
            double err_ = ef.eval();
    
    Martin Beseda's avatar
    Martin Beseda committed
            COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..."
    
                          << std::endl);
    
            COUT_INFO("Initial error: " << err_ << std::endl);
    
            size_t total_elements = ef.get_n_data_set(), updated_elements = 0, iter = 0;
    
            double max_error      = 1.0, error, gamma;
            size_t iter_idx       = this->maximum_niters;
            size_t dim            = ef.get_dimension();
    
            std::vector<double> parameter_vector = ef.get_parameters();
    
            std::vector<double> gradient_vector(dim);
            std::vector<double> search_direction(dim);
    
            std::vector<double> error_vector(ef.get_n_outputs());
    
    Martin Beseda's avatar
    Martin Beseda committed
            while (max_error >= this->tolerance && iter_idx >= 1) {
    
                max_error        = 0.0;
    
    Martin Beseda's avatar
    Martin Beseda committed
                std::fill(search_direction.begin(),
                          search_direction.end(),
                          0);
    
                for (size_t i = 0; i < total_elements; ++i) {
    
    Martin Beseda's avatar
    Martin Beseda committed
                    error = ef.eval_single_item_by_idx(i,
                                                       &parameter_vector,
                                                       error_vector);
    
                    if (error > max_error) {
    
    Martin Beseda's avatar
    Martin Beseda committed
                    if (error > this->tolerance) {
    
    Martin Beseda's avatar
    Martin Beseda committed
                        ef.calculate_error_gradient_single(error_vector,
                                                           gradient_vector);
    
    Martin Beseda's avatar
    Martin Beseda committed
                        for (size_t j = 0; j < dim; ++j) {
                            search_direction[j] += gradient_vector[j];
    
    Martin Beseda's avatar
    Martin Beseda committed
                gamma = this->get_optimal_step_size(ef,
                                                    parameter_vector,
                                                    search_direction,
                                                    updated_elements);
    
    Martin Beseda's avatar
    Martin Beseda committed
                for (size_t j = 0; j < dim; ++j) {
                    parameter_vector[j] -= gamma * search_direction[j];
    
    Martin Beseda's avatar
    Martin Beseda committed
                COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements
                                         << ", # of elements with high error: " << updated_elements << ", max. error: "
                                         << max_error << "\r");
    
    Martin Beseda's avatar
    Martin Beseda committed
            COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements
                                     << ", # of elements with high error: " << updated_elements << ", max. error: "
                                     << max_error << std::endl);
    
            this->optimal_parameters = &parameter_vector;
    
            ef.set_parameters(*this->optimal_parameters);