Newer
Older
#ifndef INC_4NEURO_ERRORFUNCTION_H
#define INC_4NEURO_ERRORFUNCTION_H
kra568
committed
#include "../settings.h"
#include "../Network/NeuralNetwork.h"
#include "../DataSet/DataSet.h"
Martin Beseda
committed
Martin Beseda
committed
//TODO HEAVY refactoring needed!
Martin Beseda
committed
namespace lib4neuro {
Martin Beseda
committed
//TODO write smarter using ErrorFunction abstract class?
Martin Beseda
committed
enum ErrorFunctionType {
ErrorFuncMSE
};
class ErrorFunction {
public:
/**
*
* @param weights
* @return
*/
virtual double eval(std::vector<double>* weights = nullptr,
bool denormalize_data = false,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param input
* @param output
* @param weights
* @return
*/
virtual double eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* weights = nullptr) = 0;
Martin Beseda
committed
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_dimension();
/**
*
* @param params
* @param grad
Martin Beseda
committed
* @param alpha
* @param batch
Martin Beseda
committed
calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
Martin Beseda
committed
double alpha = 1.0,
size_t batch = 0) = 0;

Michal Kravcenko
committed
/**
*
* @param params
* @param grad
* @param alpha
* @param batch
*/
virtual void
analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha = 1.0,
size_t batch = 0) = 0;

Michal Kravcenko
committed
virtual std::vector<double> get_parameters() = 0;
virtual size_t get_n_data_set() = 0;
Martin Beseda
committed
/**
*
* @return
*/
virtual size_t get_n_test_data_set() = 0;
virtual size_t get_n_outputs() = 0;
/**
*
* @param params
*/
virtual void set_parameters(std::vector<double>& params) = 0;
/**
*
* @param percent_train
* @return
*/
virtual void divide_data_train_test(double percent_test) = 0;
virtual void return_full_data_set_for_training() = 0;
* @param jacobian
* @param rhs
virtual void get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) = 0;
virtual double eval_on_test_data(std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
virtual double eval_on_test_data(std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
virtual double eval_on_test_data(std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @return
*/
virtual double eval_on_data_set(DataSet* data_set,
std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @param results_file_path
* @return
*/
virtual double
eval_on_data_set(DataSet* data_set,
std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
virtual double eval_on_data_set(DataSet* data_set,
std::ofstream* results_file_path = nullptr,
std::vector<double>* weights = nullptr,
bool verbose = false) = 0;
Martin Beseda
committed

Michal Kravcenko
committed
/**
*
* @param i
* @param parameter_vector
* @param error_vector
* @return
*/
virtual double eval_single_item_by_idx(size_t i,
std::vector<double>* parameter_vector,
std::vector<double>& error_vector) = 0;

Michal Kravcenko
committed
/**
*
* @param error_vector
* @param gradient_vector
*/
virtual void calculate_error_gradient_single(std::vector<double>& error_vector,
std::vector<double>& gradient_vector) = 0;
Martin Beseda
committed
/**
*
* @param input
* @param output
* @param gradient
* @param h
*/
calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
/**
*
* @param input
* @param output
* @param parameters
* @return
*/
calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters = nullptr) = 0;
/**
*
* @param scaling
*/
virtual void randomize_parameters(double scaling) = 0;
Martin Beseda
committed
protected:
/**
*
*/
size_t dimension = 0;
std::vector<NeuralNetwork*> nets;
/**
*
*/
DataSet* ds = nullptr;
/**
*
*/
DataSet* ds_full = nullptr;
/**
*
*/
DataSet* ds_test = nullptr;
Martin Beseda
committed
};
Martin Beseda
committed
public:
/**
* Constructor for single neural network
* @param net
* @param ds
*/
LIB4NEURO_API MSE(NeuralNetwork* net,
DataSet* ds);
Martin Beseda
committed
/**
*
* @param percent_train
* @return
*/
LIB4NEURO_API virtual void divide_data_train_test(double percent_test) override;
/**
*
*/
LIB4NEURO_API virtual void return_full_data_set_for_training() override;
/**
*
* @param jacobian
* @param rhs
*/
LIB4NEURO_API virtual void get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) override;
Martin Beseda
committed
/**
*
* @param weights
* @return
*/
LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
bool denormalize_data = false,
bool verbose = false) override;
/**
*
* @param params
* @param grad
Martin Beseda
committed
* @param alpha
* @param batch
Martin Beseda
committed
calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
Martin Beseda
committed
double alpha = 1.0,
size_t batch = 0) override;

Michal Kravcenko
committed
/**
*
* @param params
* @param grad
* @param alpha
* @param batch
*/
LIB4NEURO_API void
analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha = 1.0,
size_t batch = 0) override;

Michal Kravcenko
committed
Martin Beseda
committed
/**
* Evaluates the function f(x) = 0 - MSE(x) for a
* specified input x
*
* @param input
* @return
*/
double calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters) override;
/**
* Compute gradient of the residual function f(x) = 0 - MSE(x) for a specific input x.
Martin Beseda
committed
* The method uses the central difference method.
*
* @param[in] input Vector being a single input
* @param[out] gradient Resulting gradient
* @param[in] h Step used in the central difference
*/
LIB4NEURO_API void
calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
Martin Beseda
committed
/**
*
* @param input
* @return
*/
LIB4NEURO_API double eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* weights = nullptr) override;
Martin Beseda
committed
Martin Beseda
committed
* @param weights
LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::string results_file_path = nullptr,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed

Michal Kravcenko
committed
/**
*
* @param i
* @param parameter_vector
* @param error_vector
* @return
*/
LIB4NEURO_API double eval_single_item_by_idx(size_t i,
std::vector<double>* parameter_vector,
std::vector<double>& error_vector) override;

Michal Kravcenko
committed
/**
*
* @param error_vector
* @param gradient_vector
*/
LIB4NEURO_API void calculate_error_gradient_single(std::vector<double>& error_vector,
std::vector<double>& gradient_vector) override;
/**
*
* @return
*/
LIB4NEURO_API virtual std::vector<double> get_parameters() override;
/**
*
* @param params
*/
LIB4NEURO_API virtual void set_parameters(std::vector<double>& params) override;
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_data_set() override;
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_test_data_set() override;
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_outputs() override;
/**
*
* @param scaling
*/
LIB4NEURO_API virtual void randomize_parameters(double scaling) override;
Martin Beseda
committed
};
class ErrorSum : public ErrorFunction {
public:
/**
*
*/
LIB4NEURO_API ErrorSum();
/**
*
*/
LIB4NEURO_API ~ErrorSum();
/**
*
* @param weights
* @return
*/
LIB4NEURO_API double eval(std::vector<double>* weights = nullptr,
bool denormalize_data = false,
Martin Beseda
committed
LIB4NEURO_API double eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* weights = nullptr) override;
Martin Beseda
committed
/**
*
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
Martin Beseda
committed
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_test_data(std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::string results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;
Martin Beseda
committed
/**
*
* @param data_set
* @param results_file_path
* @param weights
* @return
*/
LIB4NEURO_API double eval_on_data_set(DataSet* data_set,
std::ofstream* results_file_path,
std::vector<double>* weights = nullptr,
bool verbose = false) override;

Michal Kravcenko
committed
/**
*
* @param i
* @param parameter_vector
* @param error_vector
* @return
*/
LIB4NEURO_API virtual double eval_single_item_by_idx(size_t i,
std::vector<double>* parameter_vector,
std::vector<double>& error_vector) override;

Michal Kravcenko
committed
/**
*
* @param error_vector
* @param gradient_vector
*/
LIB4NEURO_API virtual void calculate_error_gradient_single(std::vector<double>& error_vector,
std::vector<double>& gradient_vector) override;

Michal Kravcenko
committed
Martin Beseda
committed
/**
*
* @param F
*/
LIB4NEURO_API void add_error_function(ErrorFunction* F,
double alpha = 1.0);
Martin Beseda
committed
/**
*
* @return
*/
LIB4NEURO_API size_t get_dimension() override;
/**
*
* @param params
* @param grad
Martin Beseda
committed
* @param alpha
* @param batch
Martin Beseda
committed
calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
Martin Beseda
committed
double alpha = 1.0,
size_t batch = 0) override;

Michal Kravcenko
committed
/**
*
* @param params
* @param grad
* @param alpha
* @param batch
*/
LIB4NEURO_API void
analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha = 1.0,
size_t batch = 0) override;
calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
double h = 1e-3) override;
LIB4NEURO_API double
calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters = nullptr) override;
LIB4NEURO_API virtual std::vector<double> get_parameters() override;
/**
*
* @param params
*/
LIB4NEURO_API virtual void set_parameters(std::vector<double>& params) override;
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_data_set() override;
Martin Beseda
committed
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_test_data_set() override;
LIB4NEURO_API virtual size_t get_n_outputs() override;
/**
*
* @param percent
*/
LIB4NEURO_API virtual void divide_data_train_test(double percent) override;
/**
*
*/
LIB4NEURO_API virtual void return_full_data_set_for_training() override;
/**
*
* @param jacobian
* @param rhs
*/
LIB4NEURO_API virtual void get_jacobian_and_rhs(
std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) override;
/**
*
* @param scaling
*/
LIB4NEURO_API virtual void randomize_parameters(double scaling) override;
Martin Beseda
committed
std::vector<ErrorFunction*>* summand;
std::vector<double> summand_coefficient;
Martin Beseda
committed
};
}
#endif //INC_4NEURO_ERRORFUNCTION_H