Newer
Older
/**
* DESCRIPTION OF THE FILE
*
* @author Michal Kravčenko
* @date 18.7.18 -
*/
Martin Beseda
committed
#include <boost/serialization/export.hpp>
Martin Beseda
committed
#include "NeuralNetworkSumSerialization.h"
#include "General/ExprtkWrapperSerialization.h"
Martin Beseda
committed
//BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::NeuralNetworkSum);
Martin Beseda
committed
namespace lib4neuro {
NeuralNetworkSum::NeuralNetworkSum() : p_impl(new NeuralNetworkSumImpl()) {
this->p_impl->summand = nullptr;
this->p_impl->summand_coefficient = nullptr;
Martin Beseda
committed
NeuralNetworkSum::~NeuralNetworkSum() {
if (this->p_impl->summand) {
delete this->p_impl->summand;
this->p_impl->summand = nullptr;
if (this->p_impl->summand_coefficient) {
for (auto el: *this->p_impl->summand_coefficient) {
Martin Beseda
committed
delete el;
}
delete this->p_impl->summand_coefficient;
this->p_impl->summand_coefficient = nullptr;
Martin Beseda
committed
}
void NeuralNetworkSum::add_network(NeuralNetwork* net,
std::string expression_string) {
if (!this->p_impl->summand) {
this->p_impl->summand = new std::vector<NeuralNetwork*>(0);
Martin Beseda
committed
}
this->p_impl->summand->push_back(net);
Martin Beseda
committed
if (!this->p_impl->summand_coefficient) {
this->p_impl->summand_coefficient = new std::vector<ExprtkWrapper*>(0);
Martin Beseda
committed
}
this->p_impl->summand_coefficient->push_back(new ExprtkWrapper(expression_string));
void NeuralNetworkSum::eval_single(std::vector<double>& input,
std::vector<double>& output,
std::vector<double>* custom_weights_and_biases) {
Martin Beseda
committed
std::vector<double> mem_output(output.size());
std::fill(output.begin(),
output.end(),
0.0);

Michal Kravcenko
committed
for (size_t ni = 0; ni < this->p_impl->summand->size(); ++ni) {
SUM = this->p_impl->summand->at(ni);

Michal Kravcenko
committed
Martin Beseda
committed
if (SUM) {
this->p_impl->summand->at(ni)->eval_single(input,

Michal Kravcenko
committed
double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);

Michal Kravcenko
committed
Martin Beseda
committed
for (size_t j = 0; j < output.size(); ++j) {
output[j] += mem_output[j] * alpha;
}
} else {
//TODO assume the result can be a vector of doubles
double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
Martin Beseda
committed
for (size_t j = 0; j < output.size(); ++j) {
output[j] += alpha;
}

Michal Kravcenko
committed
}
Martin Beseda
committed
void NeuralNetworkSum::add_to_gradient_single(std::vector<double>& input,
std::vector<double>& error_derivative,
double error_scaling,
std::vector<double>& gradient) {

Michal Kravcenko
committed

Michal Kravcenko
committed
for (size_t ni = 0; ni < this->p_impl->summand->size(); ++ni) {
SUM = this->p_impl->summand->at(ni);

Michal Kravcenko
committed
double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
SUM->add_to_gradient_single(input,
error_derivative,
alpha * error_scaling,
gradient);

Michal Kravcenko
committed
}
}
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_weights() {
//TODO insufficient solution, assumes the networks share weights
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_n_weights();
Martin Beseda
committed
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_biases() {
//TODO insufficient solution, assumes the networks share weights
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_n_biases();
Martin Beseda
committed
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_inputs() {
//TODO insufficient solution, assumes the networks share weights
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_n_inputs();
Martin Beseda
committed
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_neurons() {
//TODO insufficient solution, assumes the networks share weights
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_n_neurons();
Martin Beseda
committed
}
Martin Beseda
committed
return 0;
Martin Beseda
committed
size_t NeuralNetworkSum::get_n_outputs() {
//TODO insufficient solution, assumes the networks share weights
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_n_outputs();
Martin Beseda
committed
}
Martin Beseda
committed
return 0;
std::vector<double>* NeuralNetworkSum::get_parameter_ptr_weights() {
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_parameter_ptr_weights();

Michal Kravcenko
committed

Michal Kravcenko
committed
}
std::vector<double>* NeuralNetworkSum::get_parameter_ptr_biases() {
if (this->p_impl->summand) {
return this->p_impl->summand->at(0)->get_parameter_ptr_biases();

Michal Kravcenko
committed

Michal Kravcenko
committed
}
void NeuralNetworkSum::eval_single_debug(std::vector<double>& input,
std::vector<double>& output,
std::vector<double>* custom_weights_and_biases) {
std::vector<double> mem_output(output.size());
std::fill(output.begin(),
output.end(),
0.0);
for (size_t ni = 0; ni < this->p_impl->summand->size(); ++ni) {
SUM = this->p_impl->summand->at(ni);
this->p_impl->summand->at(ni)->eval_single_debug(input,
double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
for (size_t j = 0; j < output.size(); ++j) {
output[j] += mem_output[j] * alpha;
}
} else {
//TODO assume the result can be a vector of doubles
double alpha = this->p_impl->summand_coefficient->at(ni)->eval(input);
for (size_t j = 0; j < output.size(); ++j) {
output[j] += alpha;
}
}
}
}