Newer
Older
* Example of a neural network with reused edge weights
//
#include <vector>
#include "4neuro.h"
int main() {

Michal Kravcenko
committed
std::cout << "Running lib4neuro example 2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
std::cout << " 0 * w1 + 1 * w2 = 0.50 + b1" << std::endl;
std::cout << " 1 * w1 + 0.5*w2 = 0.75 + b1" << std::endl;
std::cout << "(1.25 + b2) * w2 = 0.63 + b3" << std::endl;
std::cout << "***********************************************************************************************************************" <<std::endl;
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
std::vector<double> inp, out;
inp = {0, 1, 0};
out = {0.5, 0};
data_vec.emplace_back(std::make_pair(inp, out));
inp = {1, 0.5, 0};
out = {0.75, 0};
data_vec.emplace_back(std::make_pair(inp, out));
inp = {0, 0, 1.25};
out = {0, 0.63};
data_vec.emplace_back(std::make_pair(inp, out));
Martin Beseda
committed
l4n::DataSet ds(&data_vec);
/* NETWORK DEFINITION */
Martin Beseda
committed
l4n::NeuralNetwork net;
/* Input neurons */
Martin Beseda
committed
l4n::NeuronLinear *i1 = new l4n::NeuronLinear( ); //f(x) = x
l4n::NeuronLinear *i2 = new l4n::NeuronLinear( ); //f(x) = x
Martin Beseda
committed
l4n::NeuronLinear *i3 = new l4n::NeuronLinear( ); //f(x) = x
/* Output neurons */
Martin Beseda
committed
l4n::NeuronLinear *o1 = new l4n::NeuronLinear( ); //f(x) = x
l4n::NeuronLinear *o2 = new l4n::NeuronLinear( ); //f(x) = x
/* Adding neurons to the nets */
Martin Beseda
committed
size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS);
size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS);
size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS);
size_t idx4 = net.add_neuron(i3, l4n::BIAS_TYPE::NEXT_BIAS);
size_t idx5 = net.add_neuron(o2, l4n::BIAS_TYPE::NEXT_BIAS);

Michal Kravcenko
committed
/* Adding connections */
Martin Beseda
committed
net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
net.add_connection_simple(idx4, idx5, l4n::SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
net.randomize_weights();
/* specification of the input/output neurons */
std::vector<size_t> net_input_neurons_indices(3);
std::vector<size_t> net_output_neurons_indices(2);
net_input_neurons_indices[0] = idx1;
net_input_neurons_indices[1] = idx2;
net_input_neurons_indices[2] = idx4;
net_output_neurons_indices[0] = idx3;
net_output_neurons_indices[1] = idx5;
net.specify_input_neurons(net_input_neurons_indices);
net.specify_output_neurons(net_output_neurons_indices);
Martin Beseda
committed
/* COMPLEX ERROR FUNCTION SPECIFICATION */
Martin Beseda
committed
l4n::MSE mse(&net, &ds);
// double weights[2] = {-0.18012411, -0.17793740};
// double weights[2] = {1, 1};
// printf("evaluation of error at point (%f, %f) => %f\n", weights[0], weights[1], mse.eval(weights));
/* TRAINING METHOD SETUP */
//must encapsulate each of the partial error functions

Michal Kravcenko
committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
std::vector<double> domain_bounds = {-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0};
ParticleSwarm swarm_01(&mse, &domain_bounds);
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
double gamma = 0.5;
/* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
* terminating criterion is met ('n' is the total number of particles) */
double epsilon = 0.02;
double delta = 0.9;
swarm_01.optimize(gamma, epsilon, delta);
std::vector<double> *parameters = swarm_01.get_solution();
net.copy_parameter_space(parameters);
printf("w1 = %10.7f\n", parameters->at( 0 ));
printf("w2 = %10.7f\n", parameters->at( 1 ));
printf("b1 = %10.7f\n", parameters->at( 2 ));
printf("b2 = %10.7f\n", parameters->at( 3 ));
printf("b3 = %10.7f\n", parameters->at( 4 ));
std::cout << "***********************************************************************************************************************" <<std::endl;
/* ERROR CALCULATION */
double error = 0.0;
inp = {0, 1, 0};
net.eval_single( inp, out );
error += (0.5 - out[0]) * (0.5 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.5, 0.0, out[0], out[1]);

Michal Kravcenko
committed
inp = {1, 0.5, 0};
net.eval_single( inp, out );
error += (0.75 - out[0]) * (0.75 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.75, 0.0, out[0], out[1]);

Michal Kravcenko
committed
inp = {0, 0, 1.25};
net.eval_single( inp, out );
error += (0.0 - out[0]) * (0.0 - out[0]) + (0.63 - out[1]) * (0.63 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.0, 0.63, out[0], out[1]);

Michal Kravcenko
committed
std::cout << "Run finished! Error of the network: " << error / 3.0 << std::endl;
return 0;
}