Skip to content
Snippets Groups Projects
GradientDescentSingleItem.h 2.57 KiB
Newer Older
  • Learn to ignore specific revisions
  • /**
     * DESCRIPTION OF THE FILE
     *
     * @author Michal Kravčenko
     * @date 19.2.19 -
     */
    
    #ifndef LIB4NEURO_GRADIENTDESCENTSINGLEITEM_H
    #define LIB4NEURO_GRADIENTDESCENTSINGLEITEM_H
    
    
    #include "../settings.h"
    #include "../constants.h"
    
    Michal Kravcenko's avatar
    Michal Kravcenko committed
    #include "LearningMethod.h"
    
    #include "../ErrorFunction/ErrorFunctions.h"
    #include "GradientDescentBB.h"
    
    namespace lib4neuro {
        /**
         *
         */
    
    Michal Kravcenko's avatar
    Michal Kravcenko committed
        class GradientDescentSingleItem : public GradientLearningMethod {
    
    
        private:
    
            /**
             * Threshold for the successful ending of the optimization - deviation from minima
             */
            double tolerance;
    
            /**
             *
             */
            double max_error;
    
            /**
             * Number of iterations to reset step size to tolerance/10.0
             */
            size_t restart_frequency;
    
            /**
             *
             */
            size_t batch;
    
            /**
             *
             */
            size_t iter_max;
    
            /**
             * Maximal number of iterations - optimization will stop after that, even if not converged
             */
            long long int maximum_niters;
    
            /**
             * Vector of minima coordinates
             */
    
            std::vector<double> optimal_parameters;
    
    
    
        protected:
    
            /**
             * Finds the optimal value of step-length in direction @d from position @x of function @f
             * @param f
             * @param x
             * @param d
             * @param n_elems
             * @return
             */
            virtual double get_optimal_step_size(lib4neuro::ErrorFunction &f, std::vector<double> &x, std::vector<double> &d, size_t n_elems);
    
    
        public:
    
            /**
             * Creates an instance of Gradient Descent Optimizer (i.e. back-propagation)
             * @param epsilon Threshold for the successful ending of the optimization - deviation from minima
             * @param n_to_restart Number of iterations to reset step size to tolerance/10.0
             * @param max_iters Maximal number of iterations - optimization will stop after that, even if not converged
             */
            LIB4NEURO_API explicit GradientDescentSingleItem(double epsilon = 1e-3, size_t n_to_restart = 100, int max_iters = 1000, size_t batch = 0);
    
            /**
             * Deallocates the instance
             */
            LIB4NEURO_API ~GradientDescentSingleItem();
    
            /**
             *
             * @param ef
             */
            LIB4NEURO_API void optimize(lib4neuro::ErrorFunction &ef, std::ofstream* ofs = nullptr) override;
    
            /**
             *
             * @return
             */
    
            LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;