Newer
Older

Michal Kravcenko
committed
/**
* DESCRIPTION OF THE FILE
*
* @author Michal Kravčenko

Michal Kravcenko
committed
*/

Michal Kravcenko
committed
Martin Beseda
committed
#include <iostream>
Martin Beseda
committed
#include <random>
#include "../NetConnection/ConnectionFunctionConstant.h"
Martin Beseda
committed
#include "message.h"

Michal Kravcenko
committed
#include "NeuralNetwork.h"
Martin Beseda
committed
#include "NeuralNetworkSerialization.h"
#include "exceptions.h"
namespace lib4neuro{
int network_evaluation_counter = 0;
int network_backpropagation_counter = 0;
}
Martin Beseda
committed
namespace lib4neuro {
this->delete_weights = true;
this->delete_biases = true;
Martin Beseda
committed
this->layers_analyzed = false;
}
Martin Beseda
committed
NeuralNetwork::NeuralNetwork(std::string filepath) {

Michal Kravcenko
committed
this->init_from_file( filepath );
Martin Beseda
committed
}
Martin Beseda
committed
NeuralNetwork::~NeuralNetwork() {}
NeuralNetwork* NeuralNetwork::get_subnet(::std::vector<size_t>& input_neuron_indices,
::std::vector<size_t>& output_neuron_indices) {
THROW_NOT_IMPLEMENTED_ERROR();
// TODO rework due to the changed structure of the class
Martin Beseda
committed
return output_net;
}
size_t NeuralNetwork::add_neuron(std::shared_ptr<Neuron> n,
BIAS_TYPE bt,
size_t bias_idx) {
Martin Beseda
committed
if (bt == BIAS_TYPE::NO_BIAS) {
Martin Beseda
committed
} else if (bt == BIAS_TYPE::NEXT_BIAS) {
this->neuron_bias_indices.push_back((int) this->neuron_biases.size());
this->neuron_biases.resize(this->neuron_biases.size() + 1);
Martin Beseda
committed
} else if (bt == BIAS_TYPE::EXISTING_BIAS) {
THROW_RUNTIME_ERROR("The supplied bias index is too large!");
Martin Beseda
committed
}
this->neuron_bias_indices.push_back((int) bias_idx);

Michal Kravcenko
committed
}
this->outward_adjacency.push_back(std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0)));
this->inward_adjacency.push_back(std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0)));
Martin Beseda
committed
this->layers_analyzed = false;
Martin Beseda
committed
}
void NeuralNetwork::eval_single_debug(::std::vector<double>& input,
::std::vector<double>& output,
std::vector<double>* custom_weights_and_biases) {
if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) {
THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
}
if (this->input_neuron_indices.size() != input.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data input size != Network input size");
}
if (this->output_neuron_indices.size() != output.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data output size != Network output size");
}
lib4neuro::network_evaluation_counter++;
this->copy_parameter_space(custom_weights_and_biases);
this->analyze_layer_structure();
/* reset of the output and the neuron potentials */
::std::fill(output.begin(),
output.end(),
0.0);
::std::fill(this->neuron_potentials.begin(),
this->neuron_potentials.end(),
0.0);
/* set the potentials of the input neurons */
for (size_t i = 0; i < this->input_neuron_indices.size(); ++i) {
this->neuron_potentials.at(this->input_neuron_indices.at(i)) = input[i];
std::cout << this->neuron_potentials.at(this->input_neuron_indices.at(i)) << ", ";
}
std::cout << std::endl;
/* we iterate through all the feed-forward layers and transfer the signals */
for (auto layer: this->neuron_layers_feedforward) {
/* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
for (auto si: *layer) {
bias = 0.0;
bias_idx = this->neuron_bias_indices.at(si);
potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si),
bias);

Michal Kravcenko
committed
std::cout << "Neuron" << si << " (" << this->neuron_potentials.at(si) << " - " << bias << ") -> (" << potential << ")" << std::endl;
// std::cout << " applying bias: " << bias << " to neuron potential: " << this->neuron_potentials.at(si)
// << " -> " << potential << std::endl;
for (auto c: *this->outward_adjacency.at(si)) {
size_t ti = c.first;
size_t ci = c.second;
this->connection_list.at(ci)->eval(this->connection_weights) * potential;

Michal Kravcenko
committed
std::cout << " EDGE(" << si << ", " << ti << ")" << this->connection_list.at(ci)->eval(this->connection_weights) << std::endl;

Michal Kravcenko
committed
// std::cout << " adding input to neuron " << ti << " += "
// << this->connection_list.at(ci)->eval(this->connection_weights) << "*" << potential
// << std::endl;
for (auto oi: this->output_neuron_indices) {
bias = 0.0;
output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi),
bias);

Michal Kravcenko
committed
// std::cout << "setting the output[" << i << "] = " << output[i] << "(bias = " << bias << ")" << std::endl;
Martin Beseda
committed
size_t
NeuralNetwork::add_connection_simple(size_t n1_idx,
size_t n2_idx,
SIMPLE_CONNECTION_TYPE sct,
std::shared_ptr<ConnectionFunctionIdentity> con_weight_u1u2;
Martin Beseda
committed
if (sct == SIMPLE_CONNECTION_TYPE::UNITARY_WEIGHT) {
con_weight_u1u2 = std::make_shared<ConnectionFunctionIdentity>(ConnectionFunctionIdentity());
Martin Beseda
committed
} else {
if (sct == SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT) {
weight_idx = this->connection_weights.size();
this->connection_weights.resize(weight_idx + 1);
Martin Beseda
committed
} else if (sct == SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT) {
if (weight_idx >= this->connection_weights.size()) {
::std::cerr << "The supplied connection weight index is too large!\n" << ::std::endl;
Martin Beseda
committed
}

Michal Kravcenko
committed
}
con_weight_u1u2 = std::make_shared<ConnectionFunctionIdentity>(ConnectionFunctionIdentity(weight_idx));
Martin Beseda
committed
}
Martin Beseda
committed
size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
this->add_outward_connection(n1_idx,
n2_idx,
conn_idx);
this->add_inward_connection(n2_idx,
n1_idx,
conn_idx);
Martin Beseda
committed
this->layers_analyzed = false;
Martin Beseda
committed
}
NeuralNetwork::add_connection_constant(size_t n1_idx,
size_t n2_idx,
double weight) {

Michal Kravcenko
committed
std::shared_ptr<ConnectionFunctionConstant> cfc = std::make_shared<ConnectionFunctionConstant>(ConnectionFunctionConstant(weight));
size_t conn_idx = this->add_new_connection_to_list(cfc);
this->add_outward_connection(n1_idx,
n2_idx,
conn_idx);
this->add_inward_connection(n2_idx,
n1_idx,
conn_idx);
this->layers_analyzed = false;
return conn_idx;
void NeuralNetwork::add_existing_connection(size_t n1_idx,
size_t n2_idx,
size_t connection_idx,
NeuralNetwork& parent_network) {
size_t conn_idx = this->add_new_connection_to_list(parent_network.connection_list.at(connection_idx));
this->add_outward_connection(n1_idx,
n2_idx,
conn_idx);
this->add_inward_connection(n2_idx,
n1_idx,
conn_idx);
Martin Beseda
committed
this->layers_analyzed = false;
}
void NeuralNetwork::copy_parameter_space(std::vector<double>* parameters) {
Martin Beseda
committed
if (parameters != nullptr) {
for (unsigned int i = 0; i < this->connection_weights.size(); ++i) {
this->connection_weights.at(i) = (*parameters).at(i);
Martin Beseda
committed
}
for (unsigned int i = 0; i < this->neuron_biases.size(); ++i) {
(this->neuron_biases).at(i) = (*parameters).at(i + this->connection_weights.size());
Martin Beseda
committed
}
}

Michal Kravcenko
committed
void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork& parent_network) {

Michal Kravcenko
committed
if (!this->connection_weights.empty()) {
this->connection_weights.clear();
Martin Beseda
committed
}

Michal Kravcenko
committed

Michal Kravcenko
committed
Martin Beseda
committed
this->connection_weights = parent_network.connection_weights;
this->neuron_biases = parent_network.neuron_biases;
Martin Beseda
committed
this->delete_weights = false;

Michal Kravcenko
committed
}
void NeuralNetwork::eval_single(::std::vector<double>& input,
::std::vector<double>& output,
std::vector<double>* custom_weights_and_biases) {
if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) {
THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
Martin Beseda
committed
}

Michal Kravcenko
committed
if (this->input_neuron_indices.size() != input.size()) {
THROW_INVALID_ARGUMENT_ERROR("Network input size(" + std::to_string(this->input_neuron_indices.size())
+ ") != Data input size(" + std::to_string(input.size()) + ")");
Martin Beseda
committed
}

Michal Kravcenko
committed
if (this->output_neuron_indices.size() != output.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data output size != Network output size");
Martin Beseda
committed
}
lib4neuro::network_evaluation_counter++;
double potential, bias;
this->copy_parameter_space( custom_weights_and_biases ); // TODO rewrite, so the original parameters are not edited!

Michal Kravcenko
committed
Martin Beseda
committed
/* reset of the output and the neuron potentials */
::std::fill(output.begin(),
output.end(),
0.0);
::std::fill(this->neuron_potentials.begin(),
this->neuron_potentials.end(),
0.0);

Michal Kravcenko
committed
Martin Beseda
committed
/* set the potentials of the input neurons */
for (size_t i = 0; i < this->input_neuron_indices.size(); ++i) {
this->neuron_potentials.at(this->input_neuron_indices.at(i)) = input[i];
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
/* we iterate through all the feed-forward layers and transfer the signals */
for (auto layer: this->neuron_layers_feedforward) {
Martin Beseda
committed
/* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
for (auto si: *layer) {
bias = 0.0;
bias_idx = this->neuron_bias_indices.at(si);
Martin Beseda
committed
if (bias_idx >= 0) {
Martin Beseda
committed
}
potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si),
bias);

Michal Kravcenko
committed
for (auto c: *this->outward_adjacency.at(si)) {
Martin Beseda
committed
size_t ti = c.first;
size_t ci = c.second;

Michal Kravcenko
committed
this->connection_list.at(ci)->eval(this->connection_weights) * potential;
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
unsigned int i = 0;
for (auto oi: this->output_neuron_indices) {
bias = 0.0;
Martin Beseda
committed
if (bias_idx >= 0) {
Martin Beseda
committed
}
output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi),
bias);
Martin Beseda
committed
++i;

Michal Kravcenko
committed
}

Michal Kravcenko
committed
}
void NeuralNetwork::add_to_gradient_single(std::vector<double>& input,
::std::vector<double>& error_derivative,
double error_scaling,
::std::vector<double>& gradient) {

Michal Kravcenko
committed
lib4neuro::network_backpropagation_counter++;
::std::vector<double> scaling_backprog(this->get_n_neurons());
::std::fill(scaling_backprog.begin(),
scaling_backprog.end(),
0.0);

Michal Kravcenko
committed
size_t bias_shift = this->get_n_weights();
size_t neuron_idx;
double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;

Michal Kravcenko
committed

Michal Kravcenko
committed
std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
this->neuron_layers_feedforward.size() - 1);
//TODO might not work in the future as the output neurons could be permuted
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
scaling_backprog[neuron_idx] = error_derivative[i] * error_scaling;
}
/* we iterate through all the layers in reverse order and calculate partial derivatives scaled correspondingly */
for (size_t j = this->neuron_layers_feedforward.size(); j > 0; --j) {

Michal Kravcenko
committed
current_layer = this->neuron_layers_feedforward.at(j - 1);

Michal Kravcenko
committed
for (size_t i = 0; i < current_layer->size(); ++i) {

Michal Kravcenko
committed
active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());

Michal Kravcenko
committed
bias_idx = this->neuron_bias_indices.at(neuron_idx);
neuron_potential = this->neuron_potentials.at(neuron_idx);

Michal Kravcenko
committed
gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
active_neuron->activation_function_eval_derivative_bias(
scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(

Michal Kravcenko
committed
for (auto c: *this->inward_adjacency.at(neuron_idx)) {
size_t ti = c.first;
size_t ci = c.second;

Michal Kravcenko
committed
neuron_potential_t = this->neurons.at(ti)->get_last_activation_value();
connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);

Michal Kravcenko
committed
this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
gradient,
neuron_potential_t *
scaling_backprog[neuron_idx]);

Michal Kravcenko
committed
scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
}
} else {
THROW_INVALID_ARGUMENT_ERROR(
"Neuron used in backpropagation does not contain differentiable activation function!\n");

Michal Kravcenko
committed
}
}
}
}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
void NeuralNetwork::add_to_gradient_single_normalized(
::std::vector<double>& error_derivative,
::std::vector<double>& gradient,
::std::vector<double>& gradient_tmp
) {
lib4neuro::network_backpropagation_counter++;
::std::vector<double> scaling_backprog(this->get_n_neurons());
::std::fill(scaling_backprog.begin(),
scaling_backprog.end(),
0.0);
::std::fill(gradient_tmp.begin(),
gradient_tmp.end(),
0.0);
size_t bias_shift = this->get_n_weights();
size_t neuron_idx;
int bias_idx;
double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;
NeuronDifferentiable* active_neuron;
/* initial error propagation */
std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
this->neuron_layers_feedforward.size() - 1);
//TODO might not work in the future as the output neurons could be permuted
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
scaling_backprog[neuron_idx] = error_derivative[i];
}
/* we iterate through all the layers in reverse order and calculate partial derivatives scaled correspondingly */
for (size_t j = this->neuron_layers_feedforward.size(); j > 0; --j) {
current_layer = this->neuron_layers_feedforward.at(j - 1);
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());
if (active_neuron) {
bias_idx = this->neuron_bias_indices.at(neuron_idx);
neuron_potential = this->neuron_potentials.at(neuron_idx);
if (bias_idx >= 0) {
neuron_bias = this->neuron_biases.at(bias_idx);
gradient_tmp[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
active_neuron->activation_function_eval_derivative_bias(
neuron_potential,
neuron_bias);
scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
neuron_potential,
neuron_bias);
}
/* connections to lower level neurons */
for (auto c: *this->inward_adjacency.at(neuron_idx)) {
size_t ti = c.first;
size_t ci = c.second;
neuron_potential_t = this->neurons.at(ti)->get_last_activation_value();
connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);
this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
gradient_tmp,
neuron_potential_t *
scaling_backprog[neuron_idx]);
scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
}
} else {
THROW_INVALID_ARGUMENT_ERROR(
"Neuron used in backpropagation does not contain differentiable activation function!\n");
}
}
}
/* we calculate the norm of the gradient and add the rescaled portion */
double grad_norm = 0.0;
for( auto el: gradient_tmp ){
grad_norm += el * el;
}
grad_norm = std::sqrt(grad_norm);
if( grad_norm > 1e-12 ){
double error_scale = 0.0;
for( auto el: error_derivative ){
error_scale += std::abs( el );
}
grad_norm = error_scale / grad_norm;
for( size_t i = 0; i < gradient_tmp.size(); ++i ){
gradient[ i ] += grad_norm * gradient_tmp[ i ];
}
}
}
void NeuralNetwork::add_to_gradient_single_debug(std::vector<double>& input,
::std::vector<double>& error_derivative,
double error_scaling,
::std::vector<double>& gradient) {

Michal Kravcenko
committed
lib4neuro::network_backpropagation_counter++;

Michal Kravcenko
committed
::std::vector<double> scaling_backprog(this->get_n_neurons());
::std::fill(scaling_backprog.begin(),
scaling_backprog.end(),
0.0);

Michal Kravcenko
committed
size_t bias_shift = this->get_n_weights();
size_t neuron_idx;

Michal Kravcenko
committed
double neuron_potential, neuron_activation_t, neuron_bias, connection_weight;

Michal Kravcenko
committed
/* initial error propagation */
std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
this->neuron_layers_feedforward.size() - 1);

Michal Kravcenko
committed
//TODO might not work in the future as the output neurons could be permuted
std::cout << "Error scaling on the output layer: ";
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
scaling_backprog[neuron_idx] = error_derivative[i] * error_scaling;
std::cout << scaling_backprog[neuron_idx] << " [neuron " << neuron_idx << "], ";
}
std::cout << std::endl;
/* we iterate through all the layers in reverse order and calculate partial derivatives scaled correspondingly */
for (size_t j = this->neuron_layers_feedforward.size(); j > 0; --j) {

Michal Kravcenko
committed
current_layer = this->neuron_layers_feedforward.at(j - 1);

Michal Kravcenko
committed
for (size_t i = 0; i < current_layer->size(); ++i) {
active_neuron = dynamic_cast<NeuronDifferentiable*> (this->neurons.at(neuron_idx).get());

Michal Kravcenko
committed
if (active_neuron) {
std::cout << " [backpropagation] active neuron: " << neuron_idx << std::endl;
bias_idx = this->neuron_bias_indices.at(neuron_idx);
neuron_potential = this->neuron_potentials.at(neuron_idx);

Michal Kravcenko
committed
if (bias_idx >= 0) {

Michal Kravcenko
committed
gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
active_neuron->activation_function_eval_derivative_bias(

Michal Kravcenko
committed
scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(

Michal Kravcenko
committed
}
std::cout << " [backpropagation] scaling coefficient: " << scaling_backprog[neuron_idx]
<< std::endl;

Michal Kravcenko
committed
/* connections to lower level neurons */
for (auto c: *this->inward_adjacency.at(neuron_idx)) {

Michal Kravcenko
committed
size_t ti = c.first;
size_t ci = c.second;
neuron_activation_t = this->neurons.at(ti)->get_last_activation_value();
connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);

Michal Kravcenko
committed
std::cout << " [backpropagation] value (" << ti << "): " << neuron_activation_t
<< ", scaling: " << scaling_backprog[neuron_idx] << std::endl;

Michal Kravcenko
committed
this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
gradient,
neuron_activation_t *
scaling_backprog[neuron_idx]);

Michal Kravcenko
committed
scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
}
} else {
THROW_INVALID_ARGUMENT_ERROR(
"Neuron used in backpropagation does not contain differentiable activation function!\n");

Michal Kravcenko
committed
}
}
}
}
void NeuralNetwork::scale_biases(double alpha) {
for (size_t i = 0; i < this->get_n_biases(); ++i) {
this->neuron_biases.at(i) *= alpha;
void NeuralNetwork::scale_weights(double alpha) {
for (size_t i = 0; i < this->get_n_weights(); ++i) {
this->connection_weights.at(i) *= alpha;
void NeuralNetwork::scale_parameters(double alpha) {
this->scale_biases(alpha);
this->scale_weights(alpha);
Martin Beseda
committed
size_t NeuralNetwork::get_n_inputs() {
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
size_t NeuralNetwork::get_n_outputs() {
Martin Beseda
committed
}
Martin Beseda
committed
size_t NeuralNetwork::get_n_weights() {
Martin Beseda
committed
size_t NeuralNetwork::get_n_biases() {
}
Martin Beseda
committed
int NeuralNetwork::get_neuron_bias_index(size_t neuron_idx) {
return this->neuron_bias_indices.at(neuron_idx);
Martin Beseda
committed
size_t NeuralNetwork::get_n_neurons() {
}
void NeuralNetwork::specify_input_neurons(std::vector<size_t>& input_neurons_indices) {
this->input_neuron_indices = input_neurons_indices;
void NeuralNetwork::specify_output_neurons(std::vector<size_t>& output_neurons_indices) {
this->output_neuron_indices = output_neurons_indices;
Martin Beseda
committed
}
void NeuralNetwork::write_weights() {
if( lib4neuro::mpi_rank > 0 ){
return;
}
std::cout << "Connection weights: ";
if (!this->connection_weights.empty()) {
for (size_t i = 0; i < this->connection_weights.size() - 1; ++i) {
std::cout << this->connection_weights.at(i) << ", ";
}
std::cout << this->connection_weights.at(this->connection_weights.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_weights(std::string file_path) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
std::ofstream ofs(file_path);
THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
}
ofs << "Connection weights: ";
if (!this->connection_weights.empty()) {
for (size_t i = 0; i < this->connection_weights.size() - 1; ++i) {
ofs << this->connection_weights.at(i) << ", ";
}
ofs << this->connection_weights.at(this->connection_weights.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_weights(std::ofstream* file_path) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
*file_path << "Connection weights: ";
if (!this->connection_weights.empty()) {
for (size_t i = 0; i < this->connection_weights.size() - 1; ++i) {
*file_path << this->connection_weights.at(i) << ", ";
Martin Beseda
committed
}
*file_path << this->connection_weights.at(this->connection_weights.size() - 1) << std::endl;
Martin Beseda
committed
}
}
void NeuralNetwork::write_biases() {
if( lib4neuro::mpi_rank > 0 ){
return;
}
std::cout << "Network biases: ";
if (!this->neuron_biases.empty()) {
for (unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
}
std::cout << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
}
void NeuralNetwork::write_biases(std::string file_path) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
std::ofstream ofs(file_path);
THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
}
ofs << "Network biases: ";
if (!this->neuron_biases.empty()) {
for (unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
}
ofs << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_biases(std::ofstream* file_path) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
*file_path << "Network biases: ";
if (!this->neuron_biases.empty()) {
for (unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
*file_path << this->neuron_biases.at(i) << ", ";
}
*file_path << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_stats() {
if( lib4neuro::mpi_rank > 0 ){
return;
}
Martin Beseda
committed
::std::cout << std::flush
<< "Number of neurons: " << this->neurons.size() << ::std::endl
<< "Number of connections: " << this->connection_list.size() << ::std::endl
<< "Number of active weights: " << this->connection_weights.size() << ::std::endl
<< "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
Martin Beseda
committed
::std::cout << std::flush
<< "Normalization strategy maximum value: "
<< this->normalization_strategy->get_max_value() << std::endl
<< "Normalization strategy minimum value: "
<< this->normalization_strategy->get_min_value()
<< std::endl;
}
void NeuralNetwork::write_stats(std::string file_path) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
std::ofstream ofs(file_path);
THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
}
ofs << "Number of neurons: " << this->neurons.size() << ::std::endl
<< "Number of connections: " << this->connection_list.size() << ::std::endl
<< "Number of active weights: " << this->connection_weights.size() << ::std::endl
<< "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
ofs << "Normalization strategy maximum value: "
<< this->normalization_strategy->get_max_value() << std::endl
<< "Normalization strategy minimum value: "
<< this->normalization_strategy->get_min_value()
<< std::endl;
}
ofs.close();
}
void NeuralNetwork::write_stats(std::ofstream* file_path) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
*file_path << "Number of neurons: " << this->neurons.size() << ::std::endl
<< "Number of connections: " << this->connection_list.size() << ::std::endl
<< "Number of active weights: " << this->connection_weights.size() << ::std::endl
<< "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
*file_path << "Normalization strategy maximum value: "
<< this->normalization_strategy->get_max_value() << std::endl
<< "Normalization strategy minimum value: "
<< this->normalization_strategy->get_min_value()
<< std::endl;
}
}
std::vector<double>* NeuralNetwork::get_parameter_ptr_biases() {
Martin Beseda
committed
}
std::vector<double>* NeuralNetwork::get_parameter_ptr_weights() {
return &this->connection_weights;
size_t NeuralNetwork::add_new_connection_to_list(std::shared_ptr<ConnectionFunctionGeneral> con) {
this->connection_list.push_back(con);
return this->connection_list.size() - 1;
Martin Beseda
committed
}

Michal Kravcenko
committed
void NeuralNetwork::add_inward_connection(size_t s,
size_t t,
size_t con_idx) {
this->inward_adjacency.at(s) = std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0));
this->inward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t,
con_idx));
void NeuralNetwork::add_outward_connection(size_t s,
size_t t,
size_t con_idx) {
if (!this->outward_adjacency.at(s)) {
this->outward_adjacency.at(s) = std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0));
this->outward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t,
con_idx));
Martin Beseda
committed
void NeuralNetwork::analyze_layer_structure() {
Martin Beseda
committed
if (this->layers_analyzed) {
//nothing to do
return;
}
Martin Beseda
committed
/* buffer preparation */
this->neuron_potentials.resize(this->get_n_neurons());
Martin Beseda
committed
/* space allocation */
this->neuron_layers_feedforward.clear();
Martin Beseda
committed
/* helpful counters */
::std::vector<size_t> inward_saturation(n);
::std::vector<size_t> outward_saturation(n);
::std::fill(inward_saturation.begin(),
inward_saturation.end(),
0);
::std::fill(outward_saturation.begin(),
outward_saturation.end(),
0);
Martin Beseda
committed
for (unsigned int i = 0; i < n; ++i) {
if (this->inward_adjacency.at(i)) {
inward_saturation[i] = this->inward_adjacency.at(i)->size();
Martin Beseda
committed
}
if (this->outward_adjacency.at(i)) {
outward_saturation[i] = this->outward_adjacency.at(i)->size();
Martin Beseda
committed
}
}
::std::vector<size_t> active_eval_set(2 * n);
Martin Beseda
committed
/* feedforward analysis */
active_set_size[0] = 0;
active_set_size[1] = 0;
Martin Beseda
committed
size_t idx1 = 0, idx2 = 1;
active_set_size[0] = this->get_n_inputs();
size_t i = 0;
for (i = 0; i < this->get_n_inputs(); ++i) {
active_eval_set[i] = this->input_neuron_indices.at(i);
Martin Beseda
committed
}
size_t active_ni;
while (active_set_size[idx1] > 0) {
/* we add the current active set as the new outward layer */
std::shared_ptr<::std::vector<size_t>> new_feedforward_layer = std::make_shared<::std::vector<size_t>>(::std::vector<size_t>(active_set_size[idx1]));
this->neuron_layers_feedforward.push_back(new_feedforward_layer);
Martin Beseda
committed
//we iterate through the active neurons and propagate the signal
for (i = 0; i < active_set_size[idx1]; ++i) {
active_ni = active_eval_set[i + n * idx1];
new_feedforward_layer->at(i) = active_ni;
if (!this->outward_adjacency.at(active_ni)) {
Martin Beseda
committed
continue;
}
for (auto ni: *(this->outward_adjacency.at(active_ni))) {
Martin Beseda
committed
inward_saturation[ni.first]--;
Martin Beseda
committed
if (inward_saturation[ni.first] == 0) {
active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
active_set_size[idx2]++;
}
Martin Beseda
committed
idx1 = idx2;
idx2 = (idx1 + 1) % 2;
Martin Beseda
committed
active_set_size[idx2] = 0;
}
Martin Beseda
committed
this->layers_analyzed = true;
}

Michal Kravcenko
committed
void NeuralNetwork::init_from_file(const std::string &filepath) {
for( int i = 0; i < lib4neuro::mpi_nranks; ++i ){
if( i == lib4neuro::mpi_rank ){
::std::ifstream ifs(filepath);
if (ifs.is_open()) {
try {
boost::archive::text_iarchive ia(ifs);
ia >> *this;
}
catch (boost::archive::archive_exception& e) {
THROW_RUNTIME_ERROR(
"Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
"the serialized DataSet.");
}
ifs.close();
} else {
THROW_RUNTIME_ERROR("File '" + filepath + "' couldn't be open!");
}

Michal Kravcenko
committed
}
MPI_Barrier(lib4neuro::mpi_active_comm);

Michal Kravcenko
committed
}
}
Martin Beseda
committed
void NeuralNetwork::save_text(std::string filepath) {
if( lib4neuro::mpi_rank > 0 ){
return;
}
Martin Beseda
committed
{
boost::archive::text_oarchive oa(ofs);
oa << *this;
ofs.close();
}
Martin Beseda
committed
NormalizationStrategy* NeuralNetwork::get_normalization_strategy_instance() {
return this->normalization_strategy;
}
void NeuralNetwork::set_normalization_strategy_instance(NormalizationStrategy* ns) {
if (!ns) {
THROW_RUNTIME_ERROR("Argument 'ns' is not initialized!");
}
this->normalization_strategy = ns;
}
Martin Beseda
committed
FullyConnectedFFN::FullyConnectedFFN(std::vector<unsigned int>* neuron_numbers,
NEURON_TYPE hidden_layer_neuron_type,
std::ofstream* ofs) : NeuralNetwork() {
Martin Beseda
committed
std::vector<NEURON_TYPE> tmp;
for (size_t i = 0; i < neuron_numbers->size(); i++) {
Martin Beseda
committed
tmp.emplace_back(hidden_layer_neuron_type);
}