Skip to content
Snippets Groups Projects
NeuralNetworkSum.cpp 5.65 KiB
Newer Older
  • Learn to ignore specific revisions
  • /**
     * DESCRIPTION OF THE FILE
     *
     * @author Michal Kravčenko
     * @date 18.7.18 -
     */
    
    
    #include "NeuralNetworkSum.h"
    
    #include "NeuralNetworkSumSerialization.h"
    #include "General/ExprtkWrapperSerialization.h"
    
    
    BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
    
        NeuralNetworkSum::~NeuralNetworkSum() {
            if (this->summand) {
                delete this->summand;
                this->summand = nullptr;
    
                for (auto el: *this->summand_coefficient) {
                    delete el;
                }
    
                delete this->summand_coefficient;
                this->summand_coefficient = nullptr;
            }
    
        void NeuralNetworkSum::add_network(NeuralNetwork *net, std::string expression_string) {
            if (!this->summand) {
                this->summand = new std::vector<NeuralNetwork *>(0);
            }
            this->summand->push_back(net);
    
            if (!this->summand_coefficient) {
                this->summand_coefficient = new std::vector<ExprtkWrapper *>(0);
            }
            this->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
    
        void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output,
    
                                           std::shared_ptr<std::vector<double>> custom_weights_and_biases) {
    
            std::vector<double> mem_output(output.size());
            std::fill(output.begin(), output.end(), 0.0);
    
            for (size_t ni = 0; ni < this->summand->size(); ++ni) {
                SUM = this->summand->at(ni);
    
                if (SUM) {
                    this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);
    
                    double alpha = this->summand_coefficient->at(ni)->eval(input);
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += mem_output[j] * alpha;
                    }
                } else {
                    //TODO assume the result can be a vector of doubles
                    double alpha = this->summand_coefficient->at(ni)->eval(input);
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += alpha;
                    }
    
        void NeuralNetworkSum::add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative,
                                                      double error_scaling, std::vector<double> &gradient) {
    
            NeuralNetwork *SUM;
    
            for (size_t ni = 0; ni < this->summand->size(); ++ni) {
                SUM = this->summand->at(ni);
    
                if (SUM) {
                    double alpha = this->summand_coefficient->at(ni)->eval(input);
                    SUM->add_to_gradient_single(input, error_derivative, alpha * error_scaling, gradient);
                }
    
        size_t NeuralNetworkSum::get_n_weights() {
            //TODO insufficient solution, assumes the networks share weights
            if (this->summand) {
                return this->summand->at(0)->get_n_weights();
            }
    
        size_t NeuralNetworkSum::get_n_biases() {
            //TODO insufficient solution, assumes the networks share weights
            if (this->summand) {
                return this->summand->at(0)->get_n_biases();
            }
    
        size_t NeuralNetworkSum::get_n_inputs() {
            //TODO insufficient solution, assumes the networks share weights
            if (this->summand) {
                return this->summand->at(0)->get_n_inputs();
            }
    
        size_t NeuralNetworkSum::get_n_neurons() {
            //TODO insufficient solution, assumes the networks share weights
            if (this->summand) {
                return this->summand->at(0)->get_n_neurons();
            }
    
        size_t NeuralNetworkSum::get_n_outputs() {
            //TODO insufficient solution, assumes the networks share weights
            if (this->summand) {
                return this->summand->at(0)->get_n_outputs();
            }
    
        std::vector<double> *NeuralNetworkSum::get_parameter_ptr_weights() {
            if (this->summand) {
                return this->summand->at(0)->get_parameter_ptr_weights();
            }
    
            return nullptr;
    
        std::vector<double> *NeuralNetworkSum::get_parameter_ptr_biases() {
            if (this->summand) {
                return this->summand->at(0)->get_parameter_ptr_biases();
            }
    
            return nullptr;
    
        void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output,
    
                                                 std::shared_ptr<std::vector<double>> custom_weights_and_biases) {
    
            std::vector<double> mem_output(output.size());
            std::fill(output.begin(), output.end(), 0.0);
    
            NeuralNetwork *SUM;
    
            for (size_t ni = 0; ni < this->summand->size(); ++ni) {
                SUM = this->summand->at(ni);
    
                if (SUM) {
                    this->summand->at(ni)->eval_single_debug(input, mem_output, custom_weights_and_biases);
    
                    double alpha = this->summand_coefficient->at(ni)->eval(input);
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += mem_output[j] * alpha;
                    }
                } else {
                    //TODO assume the result can be a vector of doubles
                    double alpha = this->summand_coefficient->at(ni)->eval(input);
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += alpha;
                    }
                }
            }
    
        }