Skip to content
Snippets Groups Projects
GradientDescent.h 2.52 KiB
Newer Older
  • Learn to ignore specific revisions
  • /**
     * DESCRIPTION OF THE FILE
     *
     * @author Michal Kravčenko
     * @date 30.7.18 -
     */
    
    #ifndef INC_4NEURO_GRADIENTDESCENT_H
    #define INC_4NEURO_GRADIENTDESCENT_H
    
    
    #include "../settings.h"
    
    #include "../constants.h"
    
    #include "ILearningMethods.h"
    #include "../ErrorFunction/ErrorFunctions.h"
    
    namespace lib4neuro {
    
        class GradientDescent : public ILearningMethods {
    
        private:
    
             * Threshold for the successful ending of the optimization - deviation from minima
    
             */
            double tolerance;
    
    
    David Vojtek's avatar
    David Vojtek committed
            /**
             *
             */
    
             * Number of iterations to reset step size to tolerance/10.0
    
             */
            size_t restart_frequency;
    
    		size_t batch;
    
    		size_t iter_max;
    
            /**
             * Maximal number of iterations - optimization will stop after that, even if not converged
             */
    
            long long int maximum_niters;
    
    
            /**
             * Vector of minima coordinates
             */
    
            std::vector<double> *optimal_parameters;
    
            /**
             *
             * @param gamma
             * @param beta
             * @param c
             * @param grad_norm_prev
             * @param grad_norm
             * @param fi
             * @param fim
             */
            virtual void
    
            eval_step_size_mk(double &gamma,
                              double beta,
                              double &c,
                              double grad_norm_prev,
                              double grad_norm,
                              double fi,
    
                              double fim);
    
        public:
    
            /**
    
             * Creates an instance of Gradient Descent Optimizer (i.e. back-propagation)
             * @param epsilon Threshold for the successful ending of the optimization - deviation from minima
             * @param n_to_restart Number of iterations to reset step size to tolerance/10.0
             * @param max_iters Maximal number of iterations - optimization will stop after that, even if not converged
    
            LIB4NEURO_API GradientDescent(double epsilon = 1e-3, size_t n_to_restart = 100, int max_iters = 1000, size_t batch = 0);
    
             * Deallocates the instance
    
             */
            LIB4NEURO_API ~GradientDescent();
    
            /**
             *
             * @param ef
             */
            LIB4NEURO_API virtual void optimize(lib4neuro::ErrorFunction &ef);
    
            /**
             *
             * @return
             */
            LIB4NEURO_API virtual std::vector<double> *get_parameters();
        };
    
    }
    
    
    #endif //INC_4NEURO_GRADIENTDESCENT_H