Skip to content
Snippets Groups Projects
ErrorFunctions.cpp 4.93 KiB
Newer Older
//
// Created by martin on 7/15/18.
//

#include <vector>

#include "ErrorFunctions.h"
namespace lib4neuro {
    size_t ErrorFunction::get_dimension() {
        return this->dimension;
    }
    MSE::MSE(NeuralNetwork *net, DataSet *ds) {
        this->net = net;
        this->ds = ds;
        this->dimension = net->get_n_weights() + net->get_n_biases();
    }
    double MSE::eval(std::vector<double> *weights) {
        size_t dim_out = this->ds->get_output_dim();
        size_t n_elements = this->ds->get_n_elements();
        double error = 0.0, val;
        std::vector<std::pair<std::vector<double>, std::vector<double>>> *data = this->ds->get_data();
Michal Kravcenko's avatar
Michal Kravcenko committed
//    //TODO instead use something smarter
//    this->net->copy_weights(weights);
        std::vector<double> output(dim_out);
        for (auto el: *data) {  // Iterate through every element in the test set
            this->net->eval_single(el.first, output,
                                   weights);  // Compute the net output and store it into 'output' variable
Michal Kravcenko's avatar
Michal Kravcenko committed

//        printf("errors: ");
            for (size_t j = 0; j < dim_out; ++j) {  // Compute difference for every element of the output vector
                val = output[j] - el.second[j];
                error += val * val;
Michal Kravcenko's avatar
Michal Kravcenko committed

//            printf("%f, ", val * val);
Michal Kravcenko's avatar
Michal Kravcenko committed
//        printf("\n");
Michal Kravcenko's avatar
Michal Kravcenko committed
//    printf("n_elements: %d\n", n_elements);
        return error / n_elements;

    void MSE::calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha) {

        size_t dim_out = this->ds->get_output_dim();
        size_t n_elements = this->ds->get_n_elements();

        std::vector<std::pair<std::vector<double>, std::vector<double>>> *data = this->ds->get_data();

        std::vector<double> error_derivative(dim_out);


        for (auto el: *data) {  // Iterate through every element in the test set

            this->net->eval_single(el.first, error_derivative,
                                   &params);  // Compute the net output and store it into 'output' variable

            for (size_t j = 0; j < dim_out; ++j) {
                error_derivative[j] = 2.0 * (error_derivative[j] - el.second[j]); //real - expected result
            }

            this->net->add_to_gradient_single(el.first, error_derivative, alpha / n_elements, grad);
        }
    }

    std::vector<double> *MSE::get_parameters() {
        std::vector<double> *output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases());

        size_t i = 0;

        for (auto el: *this->net->get_parameter_ptr_weights()) {
            output->at(i) = el;
            ++i;
        }

        for (auto el: *this->net->get_parameter_ptr_biases()) {
            output->at(i) = el;
            ++i;
        }

        return output;
David Vojtek's avatar
David Vojtek committed
    }
    ErrorSum::ErrorSum() {
        this->summand = nullptr;
        this->summand_coefficient = nullptr;
        this->dimension = 0;
    }
    ErrorSum::~ErrorSum() {
        if (this->summand) {
            delete this->summand;
        }
        if (this->summand_coefficient) {
            delete this->summand_coefficient;
        }
David Vojtek's avatar
David Vojtek committed
    }
    double ErrorSum::eval(std::vector<double> *weights) {
        double output = 0.0;
        ErrorFunction *ef = nullptr;
        for (unsigned int i = 0; i < this->summand->size(); ++i) {
            ef = this->summand->at(i);

            if (ef) {
                output += ef->eval(weights) * this->summand_coefficient->at(i);
            }
        }

        return output;
    void ErrorSum::calculate_error_gradient(std::vector<double> &params, std::vector<double> &grad, double alpha) {

        ErrorFunction *ef = nullptr;
        for (size_t i = 0; i < this->summand->size(); ++i) {
            ef = this->summand->at(i);

            if (ef) {
                ef->calculate_error_gradient(params, grad, this->summand_coefficient->at(i) * alpha);
            }
        }
David Vojtek's avatar
David Vojtek committed
    }
    void ErrorSum::add_error_function(ErrorFunction *F, double alpha) {
        if (!this->summand) {
            this->summand = new std::vector<ErrorFunction *>(0);
        }
        this->summand->push_back(F);

        if (!this->summand_coefficient) {
            this->summand_coefficient = new std::vector<double>(0);
        }
        this->summand_coefficient->push_back(alpha);

        if (F) {
            if (F->get_dimension() > this->dimension) {
                this->dimension = F->get_dimension();
            }
        }
Michal Kravcenko's avatar
Michal Kravcenko committed
    }
    size_t ErrorSum::get_dimension() {
Michal Kravcenko's avatar
Michal Kravcenko committed
//    if(!this->dimension) {
//        size_t max = 0;
//        for(auto e : *this->summand) {
//            if(e->get_dimension() > max) {
//                max = e->get_dimension();
//            }
//        };
//
//        this->dimension = max;
//    }
        return this->dimension;
    }

    std::vector<double> *ErrorSum::get_parameters() {
        return this->summand->at(0)->get_parameters();
    }