Skip to content
Snippets Groups Projects
NeuralNetworkSum.cpp 6.62 KiB
Newer Older
  • Learn to ignore specific revisions
  • /**
     * DESCRIPTION OF THE FILE
     *
     * @author Michal Kravčenko
     * @date 18.7.18 -
     */
    
    
    #include "NeuralNetworkSum.h"
    
    #include "NeuralNetworkSumSerialization.h"
    #include "General/ExprtkWrapperSerialization.h"
    
    
    BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
    
        NeuralNetworkSum::NeuralNetworkSum() : p_impl(new NeuralNetworkSumImpl()) {
            this->p_impl->summand             = nullptr;
            this->p_impl->summand_coefficient = nullptr;
    
        NeuralNetworkSum::~NeuralNetworkSum() {
    
            if (this->p_impl->summand) {
                delete this->p_impl->summand;
                this->p_impl->summand = nullptr;
    
            if (this->p_impl->summand_coefficient) {
    
                for (auto el: *this->p_impl->summand_coefficient) {
    
                delete this->p_impl->summand_coefficient;
                this->p_impl->summand_coefficient = nullptr;
    
    Martin Beseda's avatar
    Martin Beseda committed
        void NeuralNetworkSum::add_network(NeuralNetwork* net,
                                           std::string expression_string) {
    
            if (!this->p_impl->summand) {
                this->p_impl->summand = new std::vector<NeuralNetwork*>(0);
    
            this->p_impl->summand->push_back(net);
    
            if (!this->p_impl->summand_coefficient) {
                this->p_impl->summand_coefficient = new std::vector<ExprtkWrapper*>(0);
    
            this->p_impl->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
    
    Martin Beseda's avatar
    Martin Beseda committed
        void NeuralNetworkSum::eval_single(std::vector<double>& input,
                                           std::vector<double>& output,
    
                                           std::vector<double>* custom_weights_and_biases) {
    
            std::vector<double> mem_output(output.size());
    
    Martin Beseda's avatar
    Martin Beseda committed
            std::fill(output.begin(),
                      output.end(),
                      0.0);
    
    Martin Beseda's avatar
    Martin Beseda committed
            NeuralNetwork* SUM;
    
            for (size_t ni = 0; ni < this->p_impl->summand->size(); ++ni) {
                SUM = this->p_impl->summand->at(ni);
    
                    this->p_impl->summand->at(ni)->eval_single(input,
    
    Martin Beseda's avatar
    Martin Beseda committed
                                                       mem_output,
                                                       custom_weights_and_biases);
    
                    double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += mem_output[j] * alpha;
                    }
                } else {
                    //TODO assume the result can be a vector of doubles
    
                    double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += alpha;
                    }
    
    Martin Beseda's avatar
    Martin Beseda committed
        void NeuralNetworkSum::add_to_gradient_single(std::vector<double>& input,
                                                      std::vector<double>& error_derivative,
                                                      double error_scaling,
                                                      std::vector<double>& gradient) {
    
    Martin Beseda's avatar
    Martin Beseda committed
            NeuralNetwork* SUM;
    
            for (size_t ni = 0; ni < this->p_impl->summand->size(); ++ni) {
                SUM = this->p_impl->summand->at(ni);
    
                if (SUM) {
    
                    double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
    
    Martin Beseda's avatar
    Martin Beseda committed
                    SUM->add_to_gradient_single(input,
                                                error_derivative,
                                                alpha * error_scaling,
                                                gradient);
    
        size_t NeuralNetworkSum::get_n_weights() {
            //TODO insufficient solution, assumes the networks share weights
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_n_weights();
    
        size_t NeuralNetworkSum::get_n_biases() {
            //TODO insufficient solution, assumes the networks share weights
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_n_biases();
    
        size_t NeuralNetworkSum::get_n_inputs() {
            //TODO insufficient solution, assumes the networks share weights
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_n_inputs();
    
        size_t NeuralNetworkSum::get_n_neurons() {
            //TODO insufficient solution, assumes the networks share weights
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_n_neurons();
    
        size_t NeuralNetworkSum::get_n_outputs() {
            //TODO insufficient solution, assumes the networks share weights
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_n_outputs();
    
    Martin Beseda's avatar
    Martin Beseda committed
        std::vector<double>* NeuralNetworkSum::get_parameter_ptr_weights() {
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_parameter_ptr_weights();
    
            return nullptr;
    
    Martin Beseda's avatar
    Martin Beseda committed
        std::vector<double>* NeuralNetworkSum::get_parameter_ptr_biases() {
    
            if (this->p_impl->summand) {
                return this->p_impl->summand->at(0)->get_parameter_ptr_biases();
    
            return nullptr;
    
    Martin Beseda's avatar
    Martin Beseda committed
        void NeuralNetworkSum::eval_single_debug(std::vector<double>& input,
                                                 std::vector<double>& output,
    
                                                 std::vector<double>* custom_weights_and_biases) {
    
            std::vector<double> mem_output(output.size());
    
    Martin Beseda's avatar
    Martin Beseda committed
            std::fill(output.begin(),
                      output.end(),
                      0.0);
    
    Martin Beseda's avatar
    Martin Beseda committed
            NeuralNetwork* SUM;
    
            for (size_t ni = 0; ni < this->p_impl->summand->size(); ++ni) {
                SUM = this->p_impl->summand->at(ni);
    
    
                if (SUM) {
    
                    this->p_impl->summand->at(ni)->eval_single_debug(input,
    
    Martin Beseda's avatar
    Martin Beseda committed
                                                             mem_output,
                                                             custom_weights_and_biases);
    
                    double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
    
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += mem_output[j] * alpha;
                    }
                } else {
                    //TODO assume the result can be a vector of doubles
    
                    double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
    
    
                    for (size_t j = 0; j < output.size(); ++j) {
                        output[j] += alpha;
                    }
                }
            }
    
        }