Newer
Older
/**
* DESCRIPTION OF THE FILE
*
* @author Michal Kravčenko
* @date 18.7.18 -
*/
Martin Beseda
committed
#include <boost/serialization/export.hpp>
Martin Beseda
committed
#include "NeuralNetworkSumSerialization.h"
#include "General/ExprtkWrapperSerialization.h"
Martin Beseda
committed
BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
Martin Beseda
committed
namespace lib4neuro {
Martin Beseda
committed
NeuralNetworkSum::NeuralNetworkSum() {
this->summand = nullptr;
Martin Beseda
committed
this->summand_coefficient = nullptr;
Martin Beseda
committed
NeuralNetworkSum::~NeuralNetworkSum() {
if (this->summand) {
delete this->summand;
this->summand = nullptr;
Martin Beseda
committed
if (this->summand_coefficient) {
Martin Beseda
committed
for (auto el: *this->summand_coefficient) {
delete el;
}
Martin Beseda
committed
delete this->summand_coefficient;
this->summand_coefficient = nullptr;
}
Martin Beseda
committed
void NeuralNetworkSum::add_network(NeuralNetwork *net, std::string expression_string) {
if (!this->summand) {
this->summand = new std::vector<NeuralNetwork *>(0);
}
this->summand->push_back(net);
if (!this->summand_coefficient) {
this->summand_coefficient = new std::vector<ExprtkWrapper *>(0);
}
this->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
Martin Beseda
committed
void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output,
std::vector<double> *custom_weights_and_biases) {
std::vector<double> mem_output(output.size());
std::fill(output.begin(), output.end(), 0.0);
Martin Beseda
committed
NeuralNetwork *SUM;

Michal Kravcenko
committed
Martin Beseda
committed
for (size_t ni = 0; ni < this->summand->size(); ++ni) {
SUM = this->summand->at(ni);

Michal Kravcenko
committed
Martin Beseda
committed
if (SUM) {
this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);

Michal Kravcenko
committed
Martin Beseda
committed
double alpha = this->summand_coefficient->at(ni)->eval(input);

Michal Kravcenko
committed
Martin Beseda
committed
for (size_t j = 0; j < output.size(); ++j) {
output[j] += mem_output[j] * alpha;
}
} else {
//TODO assume the result can be a vector of doubles
double alpha = this->summand_coefficient->at(ni)->eval(input);
Martin Beseda
committed
for (size_t j = 0; j < output.size(); ++j) {
output[j] += alpha;
}

Michal Kravcenko
committed
}
Martin Beseda
committed

Michal Kravcenko
committed
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
void NeuralNetworkSum::eval_single_debug(std::vector<double> &input, std::vector<double> &output,
std::vector<double> *custom_weights_and_biases) {
std::vector<double> mem_output(output.size());
std::fill(output.begin(), output.end(), 0.0);
NeuralNetwork *SUM;
for (size_t ni = 0; ni < this->summand->size(); ++ni) {
SUM = this->summand->at(ni);
if (SUM) {
this->summand->at(ni)->eval_single_debug(input, mem_output, custom_weights_and_biases);
double alpha = this->summand_coefficient->at(ni)->eval(input);
for (size_t j = 0; j < output.size(); ++j) {
output[j] += mem_output[j] * alpha;
}
} else {
//TODO assume the result can be a vector of doubles
double alpha = this->summand_coefficient->at(ni)->eval(input);
for (size_t j = 0; j < output.size(); ++j) {
output[j] += alpha;
}
}
}
}
void NeuralNetworkSum::add_to_gradient_single(std::vector<double> &input, std::vector<double> &error_derivative,
double error_scaling, std::vector<double> &gradient) {

Michal Kravcenko
committed

Michal Kravcenko
committed
for (size_t ni = 0; ni < this->summand->size(); ++ni) {
SUM = this->summand->at(ni);

Michal Kravcenko
committed
if (SUM) {
double alpha = this->summand_coefficient->at(ni)->eval(input);
SUM->add_to_gradient_single(input, error_derivative, alpha * error_scaling, gradient);
}

Michal Kravcenko
committed
}
}
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_weights() {
//TODO insufficient solution, assumes the networks share weights
if (this->summand) {
return this->summand->at(0)->get_n_weights();
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_biases() {
//TODO insufficient solution, assumes the networks share weights
if (this->summand) {
return this->summand->at(0)->get_n_biases();
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_inputs() {
//TODO insufficient solution, assumes the networks share weights
if (this->summand) {
return this->summand->at(0)->get_n_inputs();
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_neurons() {
//TODO insufficient solution, assumes the networks share weights
if (this->summand) {
return this->summand->at(0)->get_n_neurons();
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_outputs() {
//TODO insufficient solution, assumes the networks share weights
if (this->summand) {
return this->summand->at(0)->get_n_outputs();
}
Martin Beseda
committed
return 0;
std::vector<double> *NeuralNetworkSum::get_parameter_ptr_weights() {
if (this->summand) {
return this->summand->at(0)->get_parameter_ptr_weights();
}

Michal Kravcenko
committed

Michal Kravcenko
committed
}
std::vector<double> *NeuralNetworkSum::get_parameter_ptr_biases() {
if (this->summand) {
return this->summand->at(0)->get_parameter_ptr_biases();
}

Michal Kravcenko
committed

Michal Kravcenko
committed
}