Newer
Older
* Example of a neural network with reused edge weights
//
#include <vector>
#include "4neuro.h"
int main() {

Michal Kravcenko
committed
std::cout << "Running lib4neuro example 2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
std::cout << " 0 * w1 + 1 * w2 = 0.50 + b1" << std::endl;
std::cout << " 1 * w1 + 0.5*w2 = 0.75 + b1" << std::endl;
std::cout << "(1.25 + b2) * w2 = 0.63 + b3" << std::endl;
std::cout << "***********************************************************************************************************************" <<std::endl;
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
std::vector<double> inp, out;
inp = {0, 1, 0};
out = {0.5, 0};
data_vec.emplace_back(std::make_pair(inp, out));
inp = {1, 0.5, 0};
out = {0.75, 0};
data_vec.emplace_back(std::make_pair(inp, out));
inp = {0, 0, 1.25};
out = {0, 0.63};
data_vec.emplace_back(std::make_pair(inp, out));
DataSet ds(&data_vec);
/* NETWORK DEFINITION */
NeuralNetwork net;
/* Input neurons */
NeuronLinear *i1 = new NeuronLinear( ); //f(x) = x
NeuronLinear *i2 = new NeuronLinear( ); //f(x) = x

Michal Kravcenko
committed
NeuronLinear *i3 = new NeuronLinear( ); //f(x) = x
/* Output neurons */

Michal Kravcenko
committed
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x
NeuronLinear *o2 = new NeuronLinear( ); //f(x) = x
/* Adding neurons to the nets */

Michal Kravcenko
committed
size_t idx1 = net.add_neuron(i1, BIAS_TYPE::NO_BIAS);
size_t idx2 = net.add_neuron(i2, BIAS_TYPE::NO_BIAS);
size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NEXT_BIAS);
size_t idx4 = net.add_neuron(i3, BIAS_TYPE::NEXT_BIAS);
size_t idx5 = net.add_neuron(o2, BIAS_TYPE::NEXT_BIAS);
/* Adding connections */

Michal Kravcenko
committed
net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
net.add_connection_simple(idx4, idx5, SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
net.randomize_weights();
/* specification of the input/output neurons */
std::vector<size_t> net_input_neurons_indices(3);
std::vector<size_t> net_output_neurons_indices(2);
net_input_neurons_indices[0] = idx1;
net_input_neurons_indices[1] = idx2;
net_input_neurons_indices[2] = idx4;
net_output_neurons_indices[0] = idx3;
net_output_neurons_indices[1] = idx5;
net.specify_input_neurons(net_input_neurons_indices);
net.specify_output_neurons(net_output_neurons_indices);
/* COMPLEX ERROR FUNCTION SPECIFICATION */
MSE mse(&net, &ds);
// double weights[2] = {-0.18012411, -0.17793740};
// double weights[2] = {1, 1};
// printf("evaluation of error at point (%f, %f) => %f\n", weights[0], weights[1], mse.eval(weights));
/* TRAINING METHOD SETUP */
std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
domain_bounds[2 * i] = -10;
domain_bounds[2 * i + 1] = 10;
}
double c1 = 1.7;
double c2 = 1.7;
double w = 0.7;
size_t n_particles = 50;
size_t iter_max = 1000;

Michal Kravcenko
committed
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
double gamma = 0.5;
/* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
* terminating criterion is met ('n' is the total number of particles) */
double epsilon = 0.02;
double delta = 0.7;
ParticleSwarm swarm_01(
&domain_bounds,
c1,
c2,
w,
gamma,
epsilon,
delta,
n_particles,
iter_max
);
swarm_01.optimize( mse );
std::vector<double> *parameters = swarm_01.get_parameters();

Michal Kravcenko
committed
net.copy_parameter_space(parameters);
printf("w1 = %10.7f\n", parameters->at( 0 ));
printf("w2 = %10.7f\n", parameters->at( 1 ));
printf("b1 = %10.7f\n", parameters->at( 2 ));
printf("b2 = %10.7f\n", parameters->at( 3 ));
printf("b3 = %10.7f\n", parameters->at( 4 ));
std::cout << "***********************************************************************************************************************" <<std::endl;
/* ERROR CALCULATION */
double error = 0.0;
inp = {0, 1, 0};
net.eval_single( inp, out );
error += (0.5 - out[0]) * (0.5 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.5, 0.0, out[0], out[1]);

Michal Kravcenko
committed
inp = {1, 0.5, 0};
net.eval_single( inp, out );
error += (0.75 - out[0]) * (0.75 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.75, 0.0, out[0], out[1]);

Michal Kravcenko
committed
inp = {0, 0, 1.25};
net.eval_single( inp, out );
error += (0.0 - out[0]) * (0.0 - out[0]) + (0.63 - out[1]) * (0.63 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.0, 0.63, out[0], out[1]);

Michal Kravcenko
committed
std::cout << "Run finished! Error of the network: " << error / 3.0 << std::endl;
return 0;
}