Skip to content
Snippets Groups Projects
NeuralNetworkSum.cpp 6.25 KiB
Newer Older
/**
 * DESCRIPTION OF THE FILE
 *
 * @author Michal Kravčenko
 * @date 18.7.18 -
 */

#include "NeuralNetworkSum.h"
#include "NeuralNetworkSumSerialization.h"
#include "General/ExprtkWrapperSerialization.h"

BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
        this->summand             = nullptr;
    NeuralNetworkSum::~NeuralNetworkSum() {
        if (this->summand) {
            delete this->summand;
            this->summand = nullptr;
            for (auto el: *this->summand_coefficient) {
                delete el;
            }
            delete this->summand_coefficient;
            this->summand_coefficient = nullptr;
        }
Martin Beseda's avatar
Martin Beseda committed
    void NeuralNetworkSum::add_network(NeuralNetwork* net,
                                       std::string expression_string) {
Martin Beseda's avatar
Martin Beseda committed
            this->summand = new std::vector<NeuralNetwork*>(0);
        }
        this->summand->push_back(net);

        if (!this->summand_coefficient) {
Martin Beseda's avatar
Martin Beseda committed
            this->summand_coefficient = new std::vector<ExprtkWrapper*>(0);
        }
        this->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
Martin Beseda's avatar
Martin Beseda committed
    void NeuralNetworkSum::eval_single(std::vector<double>& input,
                                       std::vector<double>& output,
                                       std::vector<double>* custom_weights_and_biases) {
        std::vector<double> mem_output(output.size());
Martin Beseda's avatar
Martin Beseda committed
        std::fill(output.begin(),
                  output.end(),
                  0.0);
Martin Beseda's avatar
Martin Beseda committed
        NeuralNetwork* SUM;
        for (size_t ni = 0; ni < this->summand->size(); ++ni) {
            SUM = this->summand->at(ni);
Martin Beseda's avatar
Martin Beseda committed
                this->summand->at(ni)->eval_single(input,
                                                   mem_output,
                                                   custom_weights_and_biases);
                double alpha = this->summand_coefficient->at(ni)->eval(input);
                for (size_t j = 0; j < output.size(); ++j) {
                    output[j] += mem_output[j] * alpha;
                }
            } else {
                //TODO assume the result can be a vector of doubles
                double alpha = this->summand_coefficient->at(ni)->eval(input);
                for (size_t j = 0; j < output.size(); ++j) {
                    output[j] += alpha;
                }
Martin Beseda's avatar
Martin Beseda committed
    void NeuralNetworkSum::add_to_gradient_single(std::vector<double>& input,
                                                  std::vector<double>& error_derivative,
                                                  double error_scaling,
                                                  std::vector<double>& gradient) {
Martin Beseda's avatar
Martin Beseda committed
        NeuralNetwork* SUM;
        for (size_t ni = 0; ni < this->summand->size(); ++ni) {
            SUM = this->summand->at(ni);
            if (SUM) {
                double alpha = this->summand_coefficient->at(ni)->eval(input);
Martin Beseda's avatar
Martin Beseda committed
                SUM->add_to_gradient_single(input,
                                            error_derivative,
                                            alpha * error_scaling,
                                            gradient);
    size_t NeuralNetworkSum::get_n_weights() {
        //TODO insufficient solution, assumes the networks share weights
        if (this->summand) {
            return this->summand->at(0)->get_n_weights();
        }
    size_t NeuralNetworkSum::get_n_biases() {
        //TODO insufficient solution, assumes the networks share weights
        if (this->summand) {
            return this->summand->at(0)->get_n_biases();
        }
    size_t NeuralNetworkSum::get_n_inputs() {
        //TODO insufficient solution, assumes the networks share weights
        if (this->summand) {
            return this->summand->at(0)->get_n_inputs();
        }
    size_t NeuralNetworkSum::get_n_neurons() {
        //TODO insufficient solution, assumes the networks share weights
        if (this->summand) {
            return this->summand->at(0)->get_n_neurons();
        }
    size_t NeuralNetworkSum::get_n_outputs() {
        //TODO insufficient solution, assumes the networks share weights
        if (this->summand) {
            return this->summand->at(0)->get_n_outputs();
        }
Martin Beseda's avatar
Martin Beseda committed
    std::vector<double>* NeuralNetworkSum::get_parameter_ptr_weights() {
        if (this->summand) {
            return this->summand->at(0)->get_parameter_ptr_weights();
        }
        return nullptr;
Martin Beseda's avatar
Martin Beseda committed
    std::vector<double>* NeuralNetworkSum::get_parameter_ptr_biases() {
        if (this->summand) {
            return this->summand->at(0)->get_parameter_ptr_biases();
        }
        return nullptr;
Martin Beseda's avatar
Martin Beseda committed
    void NeuralNetworkSum::eval_single_debug(std::vector<double>& input,
                                             std::vector<double>& output,
                                             std::vector<double>* custom_weights_and_biases) {
        std::vector<double> mem_output(output.size());
Martin Beseda's avatar
Martin Beseda committed
        std::fill(output.begin(),
                  output.end(),
                  0.0);
Martin Beseda's avatar
Martin Beseda committed
        NeuralNetwork* SUM;

        for (size_t ni = 0; ni < this->summand->size(); ++ni) {
            SUM = this->summand->at(ni);

            if (SUM) {
Martin Beseda's avatar
Martin Beseda committed
                this->summand->at(ni)->eval_single_debug(input,
                                                         mem_output,
                                                         custom_weights_and_biases);

                double alpha = this->summand_coefficient->at(ni)->eval(input);

                for (size_t j = 0; j < output.size(); ++j) {
                    output[j] += mem_output[j] * alpha;
                }
            } else {
                //TODO assume the result can be a vector of doubles
                double alpha = this->summand_coefficient->at(ni)->eval(input);

                for (size_t j = 0; j < output.size(); ++j) {
                    output[j] += alpha;
                }
            }
        }

    }