Newer
Older

Michal Kravcenko
committed
/**
* DESCRIPTION OF THE FILE
*
* @author Michal Kravčenko
* @date 13.6.18 -
*/

Michal Kravcenko
committed
Martin Beseda
committed
#include <iostream>
#include <NetConnection/ConnectionFunctionConstant.h>
Martin Beseda
committed
#include "message.h"

Michal Kravcenko
committed
#include "NeuralNetwork.h"
Martin Beseda
committed
#include "NeuralNetworkSerialization.h"
#include "exceptions.h"
Martin Beseda
committed
namespace lib4neuro {
NeuralNetwork::NeuralNetwork() {
Martin Beseda
committed
this->delete_weights = true;
this->delete_biases = true;
this->layers_analyzed = false;
}
Martin Beseda
committed
NeuralNetwork::NeuralNetwork(std::string filepath) {
if(ifs.is_open()) {
try {
boost::archive::text_iarchive ia(ifs);
ia >> *this;
}catch(boost::archive::archive_exception& e) {
THROW_RUNTIME_ERROR("Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
"the serialized DataSet.");
}
ifs.close();
} else {
THROW_RUNTIME_ERROR("File '" + filepath + "' couldn't be open!");
}
Martin Beseda
committed
}
Martin Beseda
committed
NeuralNetwork::~NeuralNetwork() {
NeuralNetwork *NeuralNetwork::get_subnet(::std::vector<size_t> &input_neuron_indices,
::std::vector<size_t> &output_neuron_indices) {
THROW_NOT_IMPLEMENTED_ERROR();
Martin Beseda
committed
NeuralNetwork *output_net = nullptr;
// TODO rework due to the changed structure of the class
Martin Beseda
committed
return output_net;
}
size_t NeuralNetwork::add_neuron(std::shared_ptr<Neuron> n, BIAS_TYPE bt, size_t bias_idx) {
Martin Beseda
committed
if (bt == BIAS_TYPE::NO_BIAS) {
Martin Beseda
committed
} else if (bt == BIAS_TYPE::NEXT_BIAS) {
this->neuron_bias_indices.push_back((int) this->neuron_biases.size());
this->neuron_biases.resize(this->neuron_biases.size() + 1);
Martin Beseda
committed
} else if (bt == BIAS_TYPE::EXISTING_BIAS) {
::std::cerr << "The supplied bias index is too large!\n" << ::std::endl;
Martin Beseda
committed
}
this->neuron_bias_indices.push_back((int) bias_idx);

Michal Kravcenko
committed
}
this->outward_adjacency.push_back(std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0)));
this->inward_adjacency.push_back(std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0)));
Martin Beseda
committed
this->layers_analyzed = false;
Martin Beseda
committed
}
void NeuralNetwork::eval_single_debug(::std::vector<double> &input, ::std::vector<double> &output,
std::vector<double>* custom_weights_and_biases) {
if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) {
THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
}
if (this->input_neuron_indices.size() != input.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data input size != Network input size");
}
if (this->output_neuron_indices.size() != output.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data output size != Network output size");
}
double potential, bias;
int bias_idx;
this->copy_parameter_space(custom_weights_and_biases);
this->analyze_layer_structure();
/* reset of the output and the neuron potentials */
::std::fill(output.begin(), output.end(), 0.0);
::std::fill(this->neuron_potentials.begin(), this->neuron_potentials.end(), 0.0);
/* set the potentials of the input neurons */
for (size_t i = 0; i < this->input_neuron_indices.size(); ++i) {
this->neuron_potentials.at(this->input_neuron_indices.at(i)) = input[i];
std::cout << this->neuron_potentials.at(this->input_neuron_indices.at(i)) << ", ";
}
std::cout << std::endl;
/* we iterate through all the feed-forward layers and transfer the signals */
for (auto layer: this->neuron_layers_feedforward) {
/* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
for (auto si: *layer) {
bias = 0.0;
potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si), bias);
std::cout << " applying bias: " << bias << " to neuron potential: " << this->neuron_potentials.at(si) << " -> " << potential << std::endl;
for (auto c: *this->outward_adjacency.at(si)) {
size_t ti = c.first;
size_t ci = c.second;
this->connection_list.at(ci)->eval(this->connection_weights) * potential;
std::cout << " adding input to neuron " << ti << " += " << this->connection_list.at(ci)->eval(this->connection_weights) << "*" << potential << std::endl;
output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi), bias);
std::cout << "setting the output[" << i << "] = " << output[i] << "(bias = " << bias << ")" << std::endl;
++i;
}
}
Martin Beseda
committed
size_t
NeuralNetwork::add_connection_simple(size_t n1_idx, size_t n2_idx, SIMPLE_CONNECTION_TYPE sct,
size_t weight_idx) {
std::shared_ptr<ConnectionFunctionIdentity> con_weight_u1u2;
Martin Beseda
committed
if (sct == SIMPLE_CONNECTION_TYPE::UNITARY_WEIGHT) {
con_weight_u1u2 = std::make_shared<ConnectionFunctionIdentity>(ConnectionFunctionIdentity());
Martin Beseda
committed
} else {
if (sct == SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT) {
weight_idx = this->connection_weights.size();
this->connection_weights.resize(weight_idx + 1);
Martin Beseda
committed
} else if (sct == SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT) {
if (weight_idx >= this->connection_weights.size()) {
::std::cerr << "The supplied connection weight index is too large!\n" << ::std::endl;
Martin Beseda
committed
}

Michal Kravcenko
committed
}
con_weight_u1u2 = std::make_shared<ConnectionFunctionIdentity>(ConnectionFunctionIdentity(weight_idx));
Martin Beseda
committed
}
Martin Beseda
committed
size_t conn_idx = this->add_new_connection_to_list(con_weight_u1u2);
Martin Beseda
committed
this->add_outward_connection(n1_idx, n2_idx, conn_idx);
this->add_inward_connection(n2_idx, n1_idx, conn_idx);
Martin Beseda
committed
this->layers_analyzed = false;
Martin Beseda
committed
}
NeuralNetwork::add_connection_constant(size_t n1_idx, size_t n2_idx, double weight) {
std::shared_ptr<ConnectionFunctionConstant> cfc = std::make_shared<ConnectionFunctionConstant>(ConnectionFunctionConstant());
size_t conn_idx = this->add_new_connection_to_list(cfc);
this->add_outward_connection(n1_idx, n2_idx, conn_idx);
this->add_inward_connection(n2_idx, n1_idx, conn_idx);
this->layers_analyzed = false;
return conn_idx;
Martin Beseda
committed
void NeuralNetwork::add_existing_connection(size_t n1_idx, size_t n2_idx, size_t connection_idx,
NeuralNetwork &parent_network) {
size_t conn_idx = this->add_new_connection_to_list(parent_network.connection_list.at(connection_idx));
Martin Beseda
committed
this->add_outward_connection(n1_idx, n2_idx, conn_idx);
this->add_inward_connection(n2_idx, n1_idx, conn_idx);
Martin Beseda
committed
this->layers_analyzed = false;
}
void NeuralNetwork::copy_parameter_space(std::vector<double>* parameters) {
Martin Beseda
committed
if (parameters != nullptr) {
for (unsigned int i = 0; i < this->connection_weights.size(); ++i) {
this->connection_weights.at(i) = (*parameters).at(i);
Martin Beseda
committed
}
for (unsigned int i = 0; i < this->neuron_biases.size(); ++i) {
(this->neuron_biases).at(i) = (*parameters).at(i + this->connection_weights.size());
Martin Beseda
committed
}
}

Michal Kravcenko
committed
Martin Beseda
committed
void NeuralNetwork::set_parameter_space_pointers(NeuralNetwork &parent_network) {

Michal Kravcenko
committed
if (!this->connection_weights.empty()) {
this->connection_weights.clear();
Martin Beseda
committed
}

Michal Kravcenko
committed

Michal Kravcenko
committed
Martin Beseda
committed
this->connection_weights = parent_network.connection_weights;
this->neuron_biases = parent_network.neuron_biases;
Martin Beseda
committed
this->delete_biases = false;
this->delete_weights = false;

Michal Kravcenko
committed
}
void NeuralNetwork::eval_single(::std::vector<double>& input,
::std::vector<double>& output,
std::vector<double>* custom_weights_and_biases) {
if ((this->input_neuron_indices.size() * this->output_neuron_indices.size()) <= 0) {
THROW_INVALID_ARGUMENT_ERROR("Input and output neurons have not been specified!");
Martin Beseda
committed
}

Michal Kravcenko
committed
if (this->input_neuron_indices.size() != input.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data input size != Network input size");
Martin Beseda
committed
}

Michal Kravcenko
committed
if (this->output_neuron_indices.size() != output.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data output size != Network output size");
Martin Beseda
committed
}
Martin Beseda
committed
double potential, bias;
int bias_idx;
Martin Beseda
committed
this->copy_parameter_space(custom_weights_and_biases);
Martin Beseda
committed
this->analyze_layer_structure();

Michal Kravcenko
committed
Martin Beseda
committed
/* reset of the output and the neuron potentials */
::std::fill(output.begin(), output.end(), 0.0);
::std::fill(this->neuron_potentials.begin(), this->neuron_potentials.end(), 0.0);

Michal Kravcenko
committed
Martin Beseda
committed
/* set the potentials of the input neurons */
for (size_t i = 0; i < this->input_neuron_indices.size(); ++i) {
this->neuron_potentials.at(this->input_neuron_indices.at(i)) = input[i];
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
/* we iterate through all the feed-forward layers and transfer the signals */
for (auto layer: this->neuron_layers_feedforward) {
Martin Beseda
committed
/* we iterate through all neurons in this layer and propagate the signal to the neighboring neurons */
for (auto si: *layer) {
bias = 0.0;
Martin Beseda
committed
if (bias_idx >= 0) {
Martin Beseda
committed
}
potential = this->neurons.at(si)->activate(this->neuron_potentials.at(si), bias);

Michal Kravcenko
committed
for (auto c: *this->outward_adjacency.at(si)) {
Martin Beseda
committed
size_t ti = c.first;
size_t ci = c.second;

Michal Kravcenko
committed
this->connection_list.at(ci)->eval(this->connection_weights) * potential;
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
unsigned int i = 0;
Martin Beseda
committed
bias = 0.0;
Martin Beseda
committed
if (bias_idx >= 0) {
Martin Beseda
committed
}
output[i] = this->neurons.at(oi)->activate(this->neuron_potentials.at(oi), bias);
Martin Beseda
committed
++i;

Michal Kravcenko
committed
}

Michal Kravcenko
committed
}
void NeuralNetwork::add_to_gradient_single(std::vector<double> &input, ::std::vector<double> &error_derivative,
double error_scaling, ::std::vector<double> &gradient) {

Michal Kravcenko
committed
::std::vector<double> scaling_backprog(this->get_n_neurons());
::std::fill(scaling_backprog.begin(), scaling_backprog.end(), 0.0);

Michal Kravcenko
committed
size_t bias_shift = this->get_n_weights();
size_t neuron_idx;
int bias_idx;
double neuron_potential, neuron_potential_t, neuron_bias, connection_weight;

Michal Kravcenko
committed

Michal Kravcenko
committed
std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(this->neuron_layers_feedforward.size() - 1);
//TODO might not work in the future as the output neurons could be permuted
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
scaling_backprog[neuron_idx] = error_derivative[i] * error_scaling;
}

Michal Kravcenko
committed
/* we iterate through all the layers in reverse order and calculate partial derivatives scaled correspondingly */
for (size_t j = this->neuron_layers_feedforward.size(); j > 0; --j) {

Michal Kravcenko
committed
current_layer = this->neuron_layers_feedforward.at(j - 1);

Michal Kravcenko
committed
for (size_t i = 0; i < current_layer->size(); ++i) {

Michal Kravcenko
committed
active_neuron = dynamic_cast<NeuronDifferentiable *> (this->neurons.at(neuron_idx).get());

Michal Kravcenko
committed
bias_idx = this->neuron_bias_indices.at(neuron_idx);
neuron_potential = this->neuron_potentials.at(neuron_idx);

Michal Kravcenko
committed
gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
active_neuron->activation_function_eval_derivative_bias(
neuron_potential, neuron_bias);
scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
neuron_potential,
neuron_bias);
}

Michal Kravcenko
committed
for (auto c: *this->inward_adjacency.at(neuron_idx)) {
size_t ti = c.first;
size_t ci = c.second;

Michal Kravcenko
committed
neuron_potential_t = this->neurons.at(ti)->get_last_activation_value( );
connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);

Michal Kravcenko
committed
this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),
gradient,
neuron_potential_t *
scaling_backprog[neuron_idx]);

Michal Kravcenko
committed
scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
}
} else {
THROW_INVALID_ARGUMENT_ERROR(
"Neuron used in backpropagation does not contain differentiable activation function!\n");

Michal Kravcenko
committed
}
}
}
}

Michal Kravcenko
committed
void NeuralNetwork::add_to_gradient_single_debug(std::vector<double> &input, ::std::vector<double> &error_derivative,
double error_scaling, ::std::vector<double> &gradient) {

Michal Kravcenko
committed

Michal Kravcenko
committed
::std::vector<double> scaling_backprog(this->get_n_neurons());
::std::fill(scaling_backprog.begin(), scaling_backprog.end(), 0.0);
size_t bias_shift = this->get_n_weights();
size_t neuron_idx;
int bias_idx;
double neuron_potential, neuron_activation_t, neuron_bias, connection_weight;
NeuronDifferentiable *active_neuron;
/* initial error propagation */
std::shared_ptr<::std::vector<size_t>> current_layer = this->neuron_layers_feedforward.at(
this->neuron_layers_feedforward.size() - 1);

Michal Kravcenko
committed
//TODO might not work in the future as the output neurons could be permuted
std::cout << "Error scaling on the output layer: ";
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
scaling_backprog[neuron_idx] = error_derivative[i] * error_scaling;
std::cout << scaling_backprog[neuron_idx] << " [neuron " << neuron_idx << "], ";
}
std::cout << std::endl;
/* we iterate through all the layers in reverse order and calculate partial derivatives scaled correspondingly */
for (size_t j = this->neuron_layers_feedforward.size(); j > 0; --j) {

Michal Kravcenko
committed
current_layer = this->neuron_layers_feedforward.at(j - 1);

Michal Kravcenko
committed
for (size_t i = 0; i < current_layer->size(); ++i) {
neuron_idx = current_layer->at(i);
active_neuron = dynamic_cast<NeuronDifferentiable *> (this->neurons.at(neuron_idx).get());

Michal Kravcenko
committed
if (active_neuron) {
std::cout << " [backpropagation] active neuron: " << neuron_idx << std::endl;
bias_idx = this->neuron_bias_indices.at(neuron_idx);
neuron_potential = this->neuron_potentials.at(neuron_idx);

Michal Kravcenko
committed
if (bias_idx >= 0) {

Michal Kravcenko
committed
gradient[bias_shift + bias_idx] += scaling_backprog[neuron_idx] *
active_neuron->activation_function_eval_derivative_bias(
neuron_potential, neuron_bias);
scaling_backprog[neuron_idx] *= active_neuron->activation_function_eval_derivative(
neuron_potential,
neuron_bias);
}
std::cout << " [backpropagation] scaling coefficient: " << scaling_backprog[neuron_idx] << std::endl;
/* connections to lower level neurons */
for (auto c: *this->inward_adjacency.at(neuron_idx)) {

Michal Kravcenko
committed
size_t ti = c.first;
size_t ci = c.second;
neuron_activation_t = this->neurons.at(ti)->get_last_activation_value( );
connection_weight = this->connection_list.at(ci)->eval(this->connection_weights);

Michal Kravcenko
committed
std::cout << " [backpropagation] value ("<<ti<< "): " << neuron_activation_t << ", scaling: " << scaling_backprog[neuron_idx] << std::endl;
this->connection_list.at(ci)->eval_partial_derivative(*this->get_parameter_ptr_weights(),

Michal Kravcenko
committed
gradient,
neuron_activation_t *
scaling_backprog[neuron_idx]);
scaling_backprog[ti] += scaling_backprog[neuron_idx] * connection_weight;
}
} else {
THROW_INVALID_ARGUMENT_ERROR(
"Neuron used in backpropagation does not contain differentiable activation function!\n");
}
}
}
}

Michal Kravcenko
committed
void NeuralNetwork::randomize_weights() {

Michal Kravcenko
committed
Martin Beseda
committed
// Init weight guess ("optimal" for logistic activation functions)
double r = 4 * sqrt(6. / (this->connection_weights.size()));

Michal Kravcenko
committed
Martin Beseda
committed
boost::random::uniform_real_distribution<> dist(-r, r);

Michal Kravcenko
committed
for (size_t i = 0; i < this->connection_weights.size(); i++) {
this->connection_weights.at(i) = dist(gen);
Martin Beseda
committed
}

Michal Kravcenko
committed
}
Martin Beseda
committed
void NeuralNetwork::randomize_biases() {
Martin Beseda
committed
// Init weight guess ("optimal" for logistic activation functions)
boost::random::uniform_real_distribution<> dist(-1, 1);
for (size_t i = 0; i < this->neuron_biases.size(); i++) {
this->neuron_biases.at(i) = dist(gen);
Martin Beseda
committed
}
void NeuralNetwork::randomize_parameters() {
this->randomize_biases();
this->randomize_weights();
}
void NeuralNetwork::scale_biases(double alpha) {
for(size_t i = 0; i < this->get_n_biases(); ++i){
void NeuralNetwork::scale_weights(double alpha) {
for(size_t i = 0; i < this->get_n_weights(); ++i){
void NeuralNetwork::scale_parameters(double alpha) {
this->scale_biases( alpha );
this->scale_weights( alpha );
}
Martin Beseda
committed
size_t NeuralNetwork::get_n_inputs() {
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
size_t NeuralNetwork::get_n_outputs() {
Martin Beseda
committed
}
Martin Beseda
committed
size_t NeuralNetwork::get_n_weights() {
Martin Beseda
committed
size_t NeuralNetwork::get_n_biases() {
}
Martin Beseda
committed
int NeuralNetwork::get_neuron_bias_index(size_t neuron_idx) {
return this->neuron_bias_indices.at(neuron_idx);
Martin Beseda
committed
size_t NeuralNetwork::get_n_neurons() {
}
Martin Beseda
committed
void NeuralNetwork::specify_input_neurons(std::vector<size_t> &input_neurons_indices) {
this->input_neuron_indices = input_neurons_indices;
Martin Beseda
committed
void NeuralNetwork::specify_output_neurons(std::vector<size_t> &output_neurons_indices) {
this->output_neuron_indices = output_neurons_indices;
Martin Beseda
committed
}
void NeuralNetwork::write_weights() {
std::cout << "Connection weights: ";
if (!this->connection_weights.empty()) {
for (size_t i = 0; i < this->connection_weights.size() - 1; ++i) {
std::cout << this->connection_weights.at(i) << ", ";
}
std::cout << this->connection_weights.at(this->connection_weights.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_weights(std::string file_path) {
std::ofstream ofs(file_path);
if(!ofs.is_open()) {
THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
}
ofs << "Connection weights: ";
if (!this->connection_weights.empty()) {
for (size_t i = 0; i < this->connection_weights.size() - 1; ++i) {
ofs << this->connection_weights.at(i) << ", ";
}
ofs << this->connection_weights.at(this->connection_weights.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_weights(std::ofstream* file_path) {
*file_path << "Connection weights: ";
if (!this->connection_weights.empty()) {
for (size_t i = 0; i < this->connection_weights.size() - 1; ++i) {
*file_path << this->connection_weights.at(i) << ", ";
Martin Beseda
committed
}
*file_path << this->connection_weights.at(this->connection_weights.size() - 1) << std::endl;
Martin Beseda
committed
}
}
void NeuralNetwork::write_biases() {
std::cout << "Network biases: ";
if(!this->neuron_biases.empty()) {
for(unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
std::cout << this->neuron_biases.at(i) << ", ";
}
std::cout << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
}
void NeuralNetwork::write_biases(std::string file_path) {
std::ofstream ofs(file_path);
if(!ofs.is_open()) {
THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
}
ofs << "Network biases: ";
if(!this->neuron_biases.empty()) {
for(unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
ofs << this->neuron_biases.at(i) << ", ";
}
ofs << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_biases(std::ofstream* file_path) {
*file_path << "Network biases: ";
if(!this->neuron_biases.empty()) {
for(unsigned int i = 0; i < this->neuron_biases.size() - 1; i++) {
*file_path << this->neuron_biases.at(i) << ", ";
}
*file_path << this->neuron_biases.at(this->neuron_biases.size() - 1) << std::endl;
}
}
void NeuralNetwork::write_stats() {
Martin Beseda
committed
::std::cout << std::flush
<< "Number of neurons: " << this->neurons.size() << ::std::endl
<< "Number of connections: " << this->connection_list.size() << ::std::endl
<< "Number of active weights: " << this->connection_weights.size() << ::std::endl
<< "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
if(this->normalization_strategy) {
Martin Beseda
committed
::std::cout << std::flush
<< "Normalization strategy maximum value: "
<< this->normalization_strategy->get_max_value() << std::endl
<< "Normalization strategy minimum value: "
<< this->normalization_strategy->get_min_value()
<< std::endl;
}
void NeuralNetwork::write_stats(std::string file_path) {
std::ofstream ofs(file_path);
if(!ofs.is_open()) {
THROW_RUNTIME_ERROR("File " + file_path + " can not be opened!");
}
ofs << "Number of neurons: " << this->neurons.size() << ::std::endl
<< "Number of connections: " << this->connection_list.size() << ::std::endl
<< "Number of active weights: " << this->connection_weights.size() << ::std::endl
<< "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
if(this->normalization_strategy) {
ofs << "Normalization strategy maximum value: "
<< this->normalization_strategy->get_max_value() << std::endl
<< "Normalization strategy minimum value: "
<< this->normalization_strategy->get_min_value()
<< std::endl;
}
ofs.close();
}
void NeuralNetwork::write_stats(std::ofstream* file_path) {
*file_path << "Number of neurons: " << this->neurons.size() << ::std::endl
<< "Number of connections: " << this->connection_list.size() << ::std::endl
<< "Number of active weights: " << this->connection_weights.size() << ::std::endl
<< "Number of active biases: " << this->neuron_biases.size() << ::std::endl;
if(this->normalization_strategy) {
*file_path << "Normalization strategy maximum value: "
<< this->normalization_strategy->get_max_value() << std::endl
<< "Normalization strategy minimum value: "
<< this->normalization_strategy->get_min_value()
<< std::endl;
}
}
std::vector<double>* NeuralNetwork::get_parameter_ptr_biases() {
Martin Beseda
committed
}
std::vector<double>* NeuralNetwork::get_parameter_ptr_weights() {
return &this->connection_weights;
size_t NeuralNetwork::add_new_connection_to_list(std::shared_ptr<ConnectionFunctionGeneral> con) {
this->connection_list.push_back(con);
return this->connection_list.size() - 1;
Martin Beseda
committed
}

Michal Kravcenko
committed
Martin Beseda
committed
void NeuralNetwork::add_inward_connection(size_t s, size_t t, size_t con_idx) {
this->inward_adjacency.at(s) = std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0));
this->inward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
Martin Beseda
committed
void NeuralNetwork::add_outward_connection(size_t s, size_t t, size_t con_idx) {
if (!this->outward_adjacency.at(s)) {
this->outward_adjacency.at(s) = std::make_shared<std::vector<std::pair<size_t, size_t>>>(::std::vector<std::pair<size_t, size_t>>(0));
this->outward_adjacency.at(s)->push_back(std::pair<size_t, size_t>(t, con_idx));
Martin Beseda
committed
void NeuralNetwork::analyze_layer_structure() {
Martin Beseda
committed
if (this->layers_analyzed) {
//nothing to do
return;
}
Martin Beseda
committed
/* buffer preparation */
this->neuron_potentials.resize(this->get_n_neurons());
Martin Beseda
committed
/* space allocation */
this->neuron_layers_feedforward.clear();
Martin Beseda
committed
/* helpful counters */
::std::vector<size_t> inward_saturation(n);
::std::vector<size_t> outward_saturation(n);
::std::fill(inward_saturation.begin(), inward_saturation.end(), 0);
::std::fill(outward_saturation.begin(), outward_saturation.end(), 0);
Martin Beseda
committed
for (unsigned int i = 0; i < n; ++i) {
if (this->inward_adjacency.at(i)) {
inward_saturation[i] = this->inward_adjacency.at(i)->size();
Martin Beseda
committed
}
if (this->outward_adjacency.at(i)) {
outward_saturation[i] = this->outward_adjacency.at(i)->size();
Martin Beseda
committed
}
}
::std::vector<size_t> active_eval_set(2 * n);
Martin Beseda
committed
size_t active_set_size[2];
Martin Beseda
committed
/* feedforward analysis */
active_set_size[0] = 0;
active_set_size[1] = 0;
Martin Beseda
committed
size_t idx1 = 0, idx2 = 1;
active_set_size[0] = this->get_n_inputs();
size_t i = 0;
for (i = 0; i < this->get_n_inputs(); ++i) {
active_eval_set[i] = this->input_neuron_indices.at(i);
Martin Beseda
committed
}
size_t active_ni;
while (active_set_size[idx1] > 0) {
/* we add the current active set as the new outward layer */
std::shared_ptr<::std::vector<size_t>> new_feedforward_layer = std::make_shared<::std::vector<size_t>>(::std::vector<size_t>(active_set_size[idx1]));
this->neuron_layers_feedforward.push_back(new_feedforward_layer);
Martin Beseda
committed
//we iterate through the active neurons and propagate the signal
for (i = 0; i < active_set_size[idx1]; ++i) {
active_ni = active_eval_set[i + n * idx1];
new_feedforward_layer->at(i) = active_ni;
if (!this->outward_adjacency.at(active_ni)) {
Martin Beseda
committed
continue;
}
for (auto ni: *(this->outward_adjacency.at(active_ni))) {
Martin Beseda
committed
inward_saturation[ni.first]--;
Martin Beseda
committed
if (inward_saturation[ni.first] == 0) {
active_eval_set[active_set_size[idx2] + n * idx2] = ni.first;
active_set_size[idx2]++;
}
Martin Beseda
committed
idx1 = idx2;
idx2 = (idx1 + 1) % 2;
Martin Beseda
committed
active_set_size[idx2] = 0;
}
Martin Beseda
committed
this->layers_analyzed = true;
}
Martin Beseda
committed
void NeuralNetwork::save_text(std::string filepath) {
Martin Beseda
committed
{
boost::archive::text_oarchive oa(ofs);
oa << *this;
ofs.close();
}
Martin Beseda
committed
NormalizationStrategy* NeuralNetwork::get_normalization_strategy_instance() {
return this->normalization_strategy;
}
void NeuralNetwork::set_normalization_strategy_instance(NormalizationStrategy *ns) {
if(!ns) {
THROW_RUNTIME_ERROR("Argument 'ns' is not initialized!");
}
this->normalization_strategy = ns;
}
Martin Beseda
committed
FullyConnectedFFN::FullyConnectedFFN(std::vector<unsigned int>* neuron_numbers,
NEURON_TYPE hidden_layer_neuron_type,
std::ofstream* ofs) : NeuralNetwork() {
Martin Beseda
committed
std::vector<NEURON_TYPE> tmp;
for(auto i = 0; i < neuron_numbers->size(); i++) {
tmp.emplace_back(hidden_layer_neuron_type);
}
Martin Beseda
committed
this->init(neuron_numbers, &tmp, ofs);
Martin Beseda
committed
}
FullyConnectedFFN::FullyConnectedFFN(std::vector<unsigned int>* neuron_numbers,
Martin Beseda
committed
std::vector<lib4neuro::NEURON_TYPE>* hidden_layer_neuron_types,
std::ofstream* ofs) : NeuralNetwork() {
this->init(neuron_numbers, hidden_layer_neuron_types, ofs);
Martin Beseda
committed
}
Martin Beseda
committed
void FullyConnectedFFN::init(std::vector<unsigned int>* neuron_numbers,
std::vector<NEURON_TYPE>* hidden_layer_neuron_types,
std::ofstream* ofs) {
THROW_INVALID_ARGUMENT_ERROR("Parameter 'neuron_numbers' specifying numbers of neurons in network's layers "
"doesn't specify input and output layers, which are compulsory!");
this->delete_weights = true;
this->delete_biases = true;
this->layers_analyzed = false;
unsigned int inp_dim = neuron_numbers->at(0); //!< Network input dimension
unsigned int out_dim = neuron_numbers->back(); //!< Network output dimension
COUT_DEBUG("Fully connected feed-forward network being constructed:" << std::endl);
COUT_DEBUG("# of inputs: " << inp_dim << std::endl);
COUT_DEBUG("# of outputs: " << out_dim << std::endl);
Martin Beseda
committed
WRITE_TO_OFS_DEBUG(ofs, "Fully connected feed-forward network being constructed:" << std::endl
<< "# of inputs: " << inp_dim << std::endl
<< "# of outputs: " << out_dim << std::endl);
Martin Beseda
committed
std::vector<size_t> input_layer_neuron_indices;
std::vector<size_t> previous_layer_neuron_indices;
std::vector<size_t> current_layer_neuron_indices;
/* Creation of INPUT layer neurons */
current_layer_neuron_indices.reserve(inp_dim);
input_layer_neuron_indices.reserve(inp_dim);
for(unsigned int i = 0; i < inp_dim; i++) {
std::shared_ptr<Neuron> new_neuron;
new_neuron.reset(new NeuronLinear());
size_t neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
current_layer_neuron_indices.emplace_back(neuron_id);
}
input_layer_neuron_indices = current_layer_neuron_indices;
/* Creation of HIDDEN layers */
for(unsigned int i = 1; i <= neuron_numbers->size()-2; i++) {
COUT_DEBUG("Hidden layer #" << i << ": " << neuron_numbers->at(i) << " neurons" << std::endl);
Martin Beseda
committed
WRITE_TO_OFS_DEBUG(ofs, "Hidden layer #" << i << ": " << neuron_numbers->at(i) << " neurons" << std::endl);
previous_layer_neuron_indices.reserve(neuron_numbers->at(i-1));
previous_layer_neuron_indices = current_layer_neuron_indices;
current_layer_neuron_indices.clear();
current_layer_neuron_indices.reserve(neuron_numbers->at(i));
/* Creation of one single hidden layer */
for(unsigned int j = 0; j < neuron_numbers->at(i); j++) {
size_t neuron_id;
/* Create new hidden neuron */
switch (hidden_layer_neuron_types->at(i-1)) {
case NEURON_TYPE::BINARY: {
std::shared_ptr<Neuron> new_neuron;
new_neuron.reset(new NeuronBinary());
neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NEXT_BIAS);
Martin Beseda
committed
COUT_DEBUG("Added BINARY neuron." << std::endl);
Martin Beseda
committed
WRITE_TO_OFS_DEBUG(ofs, "Added BINARY neuron." << std::endl);
case NEURON_TYPE::CONSTANT: {
THROW_INVALID_ARGUMENT_ERROR("Constant neurons can't be used in fully connected feed-forward networks!");
case NEURON_TYPE::LINEAR: {
std::shared_ptr<Neuron> new_neuron;
new_neuron.reset(new NeuronLinear());
neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NEXT_BIAS);
Martin Beseda
committed
COUT_DEBUG("Added LINEAR neuron." << std::endl);
Martin Beseda
committed
WRITE_TO_OFS_DEBUG(ofs, "Added LINEAR neuron." << std::endl);
case NEURON_TYPE::LOGISTIC: {
std::shared_ptr<Neuron> new_neuron;
new_neuron.reset(new NeuronLogistic());
neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NEXT_BIAS);
Martin Beseda
committed
COUT_DEBUG("Added LOGISTIC neuron." << std::endl);
Martin Beseda
committed
WRITE_TO_OFS_DEBUG(ofs, "Added LINEAR neuron." << std::endl);
break;
}
}
current_layer_neuron_indices.emplace_back(neuron_id);
/* Connect new neuron with all neurons from the previous layer */
for(auto ind : previous_layer_neuron_indices) {
this->add_connection_simple(ind, neuron_id, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
}
}
}
previous_layer_neuron_indices.reserve(neuron_numbers->back()-1);
previous_layer_neuron_indices = current_layer_neuron_indices;
current_layer_neuron_indices.clear();
current_layer_neuron_indices.reserve(out_dim);
/* Creation of OUTPUT layer neurons */
for(unsigned int i = 0; i < out_dim; i++) {
std::shared_ptr<Neuron> new_neuron;
new_neuron.reset(new NeuronLinear());
size_t neuron_id = this->add_neuron(new_neuron, BIAS_TYPE::NO_BIAS);
current_layer_neuron_indices.emplace_back(neuron_id);
/* Connect new neuron with all neuron from the previous layer */
for(auto ind : previous_layer_neuron_indices) {
this->add_connection_simple(ind, neuron_id, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
}
}
/* Init variables containing indices of INPUT nad OUTPUT neurons */
this->input_neuron_indices = input_layer_neuron_indices;
this->output_neuron_indices = current_layer_neuron_indices;
this->analyze_layer_structure();
void NeuralNetwork::get_jacobian(std::vector<std::vector<double>> &jacobian, std::pair<std::vector<double>, std::vector<double>> &data, std::vector<double> &error) {
std::vector<double> fv(this->get_n_outputs());
jacobian.resize(this->get_n_outputs());
error.resize(this->get_n_outputs());
for(size_t i = 0; i < this->get_n_outputs(); ++i){
jacobian[i].resize(this->get_n_weights() + this->get_n_biases());
std::fill(jacobian[i].begin(), jacobian[i].end(), 0);
}
this->eval_single( data.first, fv );
std::vector<double> error_partial(this->get_n_outputs());
std::fill(error_partial.begin(), error_partial.end(), 0.0);
for( size_t i = 0; i < this->get_n_outputs(); ++i){
error_partial[i] = 1;
this->add_to_gradient_single(data.first, error_partial, 1.0, jacobian[i]);
error[i] = data.second[i] - fv[i];
error_partial[i] = 0;
}
}