Newer
Older
/**
* DESCRIPTION OF THE FILE
*
* @author Michal Kravčenko
* @date 18.7.18 -
*/
#include "NeuralNetworkSum.h"
NeuralNetworkSum::NeuralNetworkSum(){
this->summand = nullptr;
this->summand_coefficient = nullptr;
}
NeuralNetworkSum::~NeuralNetworkSum() {
if( this->summand ){
delete this->summand;
}
if( this->summand_coefficient ){
delete this->summand_coefficient;
}
}

Michal Kravcenko
committed
void NeuralNetworkSum::add_network( NeuralNetwork *net, std::string expression_string ) {
if(!this->summand){
this->summand = new std::vector<NeuralNetwork*>(0);
}
this->summand->push_back( net );
if(!this->summand_coefficient){

Michal Kravcenko
committed
this->summand_coefficient = new std::vector<ExprtkWrapper>(0);

Michal Kravcenko
committed
this->summand_coefficient->push_back( ExprtkWrapper( expression_string ) );
void NeuralNetworkSum::eval_single(std::vector<double> &input, std::vector<double> &output, std::vector<double> *custom_weights_and_biases) {
std::vector<double> mem_output(output.size());
std::fill(output.begin(), output.end(), 0.0);

Michal Kravcenko
committed
NeuralNetwork *SUM;
for(size_t ni = 0; ni < this->summand->size(); ++ni){

Michal Kravcenko
committed
SUM = this->summand->at(ni);
if( SUM ){
this->summand->at(ni)->eval_single(input, mem_output, custom_weights_and_biases);

Michal Kravcenko
committed

Michal Kravcenko
committed
double alpha = this->summand_coefficient->at(ni).eval(input);

Michal Kravcenko
committed

Michal Kravcenko
committed
for(size_t j = 0; j < output.size(); ++j){
output[j] += mem_output[j] * alpha;
}
}
else{
//TODO assume the result can be a vector of doubles
double alpha = this->summand_coefficient->at(ni).eval(input);
for(size_t j = 0; j < output.size(); ++j){
output[j] += alpha;
}
}
}
}
size_t NeuralNetworkSum::get_n_weights(){
//TODO insufficient solution, assumes the networks share weights
if(this->summand){
return this->summand->at(0)->get_n_weights();
}
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
return 0;
}
size_t NeuralNetworkSum::get_n_biases(){
//TODO insufficient solution, assumes the networks share weights
if(this->summand){
return this->summand->at(0)->get_n_biases();
}
return 0;
}
size_t NeuralNetworkSum::get_n_inputs() {
//TODO insufficient solution, assumes the networks share weights
if(this->summand){
return this->summand->at(0)->get_n_inputs();
}
return 0;
}
size_t NeuralNetworkSum::get_n_neurons() {
//TODO insufficient solution, assumes the networks share weights
if(this->summand){
return this->summand->at(0)->get_n_neurons();
}
return 0;
}
size_t NeuralNetworkSum::get_n_outputs() {
//TODO insufficient solution, assumes the networks share weights
if(this->summand){
return this->summand->at(0)->get_n_outputs();
}