Newer
Older
//
// Created by martin on 7/15/18.
//
#ifndef INC_4NEURO_ERRORFUNCTION_H
#define INC_4NEURO_ERRORFUNCTION_H
kra568
committed
#include "../settings.h"
#include "../Network/NeuralNetwork.h"
#include "../DataSet/DataSet.h"
Martin Beseda
committed
Martin Beseda
committed
//TODO HEAVY refactoring needed!
Martin Beseda
committed
namespace lib4neuro {
Martin Beseda
committed
//TODO write smarter using ErrorFunction abstract class?
Martin Beseda
committed
enum ErrorFunctionType {
ErrorFuncMSE
};
class ErrorFunction {
public:
/**
*
* @param weights
* @return
*/
virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_dimension();
/**
*
* @param params
* @param grad
Martin Beseda
committed
* @param alpha
* @param batch
Martin Beseda
committed
calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
Martin Beseda
committed
double alpha = 1.0,
size_t batch = 0) = 0;

Michal Kravcenko
committed
/**
*
* @param params
* @param grad
* @param alpha
* @param batch
*/
virtual void
analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha = 1.0,
size_t batch = 0) = 0;
virtual std::shared_ptr<std::vector<double>> get_parameters();
Martin Beseda
committed
virtual DataSet* get_dataset();
/**
*
* @return
*/
virtual DataSet* get_test_dataset();
/**
*
* @return
*/
NeuralNetwork* get_network_instance();
/**
*
* @param percent_train
* @return
*/
void divide_data_train_test(double percent_test);
/**
*
*/
void return_full_data_set_for_training();
/**
*
*/
virtual double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
virtual double eval_on_test_data(std::string results_file_path, std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
virtual double eval_on_test_data(std::ofstream* results_file_path, std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @return
*/
virtual double eval_on_data_set(DataSet* data_set, std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @param results_file_path
* @return
*/
virtual double
eval_on_data_set(DataSet* data_set, std::string results_file_path, std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
virtual double eval_on_data_set(DataSet* data_set,
std::ofstream* results_file_path = nullptr,
std::vector<double>* weights = nullptr,
bool denormalize_data = true,
bool verbose = false) = 0;
Martin Beseda
committed

Michal Kravcenko
committed
/**
*
* @param i
* @param parameter_vector
* @param error_vector
* @return
*/
virtual double eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, std::vector<double> &error_vector) = 0;
/**
*
* @param error_vector
* @param gradient_vector
*/
virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) = 0;
Martin Beseda
committed
/**
*
* @param input
* @param output
* @param gradient
* @param h
*/
virtual void
calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
double h = 1e-3) = 0;
/**
*
* @param input
* @param output
* @param parameters
* @return
*/
virtual double
calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters = nullptr) = 0;
Martin Beseda
committed
protected:
/**
*
*/
size_t dimension = 0;
NeuralNetwork* net = nullptr;
/**
*
*/
DataSet* ds = nullptr;
/**
*
*/
DataSet* ds_full = nullptr;
/**
*
*/
DataSet* ds_test = nullptr;
Martin Beseda
committed
};
class MSE : public ErrorFunction{
Martin Beseda
committed
public:
/**
* Constructor for single neural network
* @param net
* @param ds
*/
Martin Beseda
committed
LIB4NEURO_API MSE(NeuralNetwork* net, DataSet* ds);
Martin Beseda
committed
/**
*
* @param weights
* @return
*/
LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
bool denormalize_data = false,
bool verbose = false) override;
/**
*
* @param params
* @param grad
Martin Beseda
committed
* @param alpha
* @param batch
Martin Beseda
committed
calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
Martin Beseda
committed
double alpha = 1.0,
size_t batch = 0) override;

Michal Kravcenko
committed
/**
*
* @param params
* @param grad
* @param alpha
* @param batch
*/
LIB4NEURO_API void
analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha = 1.0,
size_t batch = 0) override;
Martin Beseda
committed
/**
* Evaluates the function f(x) = 0 - MSE(x) for a
* specified input x
*
* @param input
* @return
*/
virtual double calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters) override;
/**
* Compute gradient of the residual function f(x) = 0 - MSE(x) for a specific input x.
Martin Beseda
committed
* The method uses the central difference method.
*
* @param[in] input Vector being a single input
* @param[out] gradient Resulting gradient
* @param[in] h Step used in the central difference
*/
LIB4NEURO_API void
calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
double h=1e-3) override;
/**
*
* @param input
* @return
*/
LIB4NEURO_API double eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* weights = nullptr);
Martin Beseda
committed
* @param weights
LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::string results_file_path = nullptr,
std::vector<double>* weights = nullptr,
bool verbose = false);
Martin Beseda
committed
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool denormalize_data = false,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed

Michal Kravcenko
committed
/**
*
* @param i
* @param parameter_vector
* @param error_vector
* @return
*/
LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, std::vector<double> &error_vector) override;
/**
*
* @param error_vector
* @param gradient_vector
*/
LIB4NEURO_API virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) override;
Martin Beseda
committed
};
class ErrorSum : public ErrorFunction {
public:
/**
*
*/
LIB4NEURO_API ErrorSum();
/**
*
*/
LIB4NEURO_API ~ErrorSum();
/**
*
* @param weights
* @return
*/
LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
bool denormalize_data = false,
Martin Beseda
committed
/**
*
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr, bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
Martin Beseda
committed
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool denormalize_data = true,
bool verbose = false) override;

Michal Kravcenko
committed
/**
*
* @param i
* @param parameter_vector
* @param error_vector
* @return
*/
LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i, std::vector<double> *parameter_vector, std::vector<double> &error_vector) override;
/**
*
* @param error_vector
* @param gradient_vector
*/
LIB4NEURO_API virtual void calculate_error_gradient_single(std::vector<double> &error_vector, std::vector<double> &gradient_vector) override;
Martin Beseda
committed
/**
*
* @param F
*/
Martin Beseda
committed
LIB4NEURO_API void add_error_function(ErrorFunction* F, double alpha = 1.0);
Martin Beseda
committed
/**
*
* @return
*/
LIB4NEURO_API size_t get_dimension() override;
/**
*
* @param params
* @param grad
Martin Beseda
committed
* @param alpha
* @param batch
Martin Beseda
committed
calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
Martin Beseda
committed
double alpha = 1.0,
size_t batch = 0) override;

Michal Kravcenko
committed
/**
*
* @param params
* @param grad
* @param alpha
* @param batch
*/
LIB4NEURO_API void
analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha = 1.0,
size_t batch = 0) override;
LIB4NEURO_API void
calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
double h = 1e-3) override;
LIB4NEURO_API double
calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters = nullptr) override;
LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
Martin Beseda
committed
/**
*
* @return
*/
LIB4NEURO_API DataSet* get_dataset() override;
Martin Beseda
committed
std::vector<ErrorFunction*>* summand;
std::vector<double>* summand_coefficient;
Martin Beseda
committed
};
}
#endif //INC_4NEURO_ERRORFUNCTION_H