Commit 6dca5886 authored by Michal Kravcenko's avatar Michal Kravcenko
Browse files

MOD: modified all examples to include some meaningful commentary of what's going on

parent afcae45d
......@@ -178,11 +178,14 @@ void Particle::print_coordinate() {
printf("%10.8f\n", (*this->coordinate)[this->coordinate_dim - 1]);
}
ParticleSwarm::ParticleSwarm(ErrorFunction* ef, double *domain_bounds,
ParticleSwarm::ParticleSwarm(ErrorFunction* ef, std::vector<double> *domain_bounds,
double c1, double c2, double w, size_t n_particles, size_t iter_max) {
srand(time(NULL));
if(domain_bounds->size() < 2 * ef->get_dimension()){
std::cerr << "The supplied domain bounds dimension is too low! It should be at least " << 2 * ef->get_dimension() << "\n" << std::endl;
}
this->f = ef;
this->func_dim = ef->get_dimension();
......@@ -200,14 +203,12 @@ ParticleSwarm::ParticleSwarm(ErrorFunction* ef, double *domain_bounds,
this->iter_max = iter_max;
this->particle_swarm = new Particle*[this->n_particles];
this->domain_bounds = &(domain_bounds->at(0));
for( size_t pi = 0; pi < this->n_particles; ++pi ){
this->particle_swarm[pi] = new Particle(ef, domain_bounds);
this->particle_swarm[pi] = new Particle(ef, this->domain_bounds);
}
this->domain_bounds = domain_bounds;
}
ParticleSwarm::~ParticleSwarm() {
......@@ -337,7 +338,7 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
// }
/* Check if the particles are near to each other AND the maximal velocity is less than 'gamma' */
if(cluster.size() > delta * this->n_particles && std::abs(prev_max_vel_step/max_vel_step) > gamma) {
if( cluster.size() > delta * this->n_particles && prev_max_vel_step < gamma * max_vel_step ) {
break;
}
......@@ -348,21 +349,18 @@ void ParticleSwarm::optimize( double gamma, double epsilon, double delta) {
this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
if(outer_it < this->iter_max) {
/* Convergence reached */
printf("\nFound optimum in %d iterations: %10.8f at coordinates: \n", (int)outer_it, optimal_value);
for (size_t i = 0; i <= this->func_dim - 1; ++i) {
printf("%10.8f \n", (*this->p_min_glob)[i]);
}
printf("\nFound optimum in %d iterations. Objective function value: %10.8f\n", (int)outer_it, optimal_value);
} else {
/* Maximal number of iterations reached */
printf("\nMax number of iterations reached (%d)! Found value %10.8f at coordinates: \n", (int)outer_it, optimal_value);
for (size_t i = 0; i <= this->func_dim - 1; ++i) {
printf("\t%10.8f \n", (*this->p_min_glob)[i]);
}
printf("\nMax number of iterations reached (%d)! Objective function value: %10.8f\n", (int)outer_it, optimal_value);
}
// for (size_t i = 0; i <= this->func_dim - 1; ++i) {
// printf("%10.8f \n", (*this->p_min_glob)[i]);
// }
//
// this->f->eval( this->get_solution() );
//delete [] p_min_glob; // TODO
delete centroid;
}
......
......@@ -170,7 +170,8 @@ public:
* @param n_particles
* @param iter_max
*/
ParticleSwarm( ErrorFunction* ef, double* domain_bounds, double c1, double c2, double w, size_t n_particles, size_t iter_max = 1 );
//TODO make domain_bounds constant
ParticleSwarm( ErrorFunction* ef, std::vector<double> *domain_bounds, double c1 = 1.711897, double c2 = 1.711897, double w = 0.711897, size_t n_particles = 50, size_t iter_max = 1000 );
/**
*
......
......@@ -319,7 +319,7 @@ void DESolver::set_error_function(size_t equation_idx, ErrorFunctionType F, Data
}
//TODO instead use general method with Optimizer as its argument (create hierarchy of optimizers)
void DESolver::solve_via_particle_swarm(double *domain_bounds, double c1, double c2, double w,
void DESolver::solve_via_particle_swarm(std::vector<double> *domain_bounds, double c1, double c2, double w,
size_t n_particles, size_t max_iters, double gamma,
double epsilon, double delta) {
......
......@@ -135,7 +135,7 @@ public:
void solve_via_particle_swarm(
double * domain_bounds,
std::vector<double> *domain_bounds,
double c1,
double c2,
double w,
......
/**
* Basic example using particle swarm method to train the network
* (result 0, -1/4)
*/
//
......@@ -13,6 +12,13 @@
int main() {
std::cout << "Running lib4neuro example 1: Basic use of the particle swarm method to train a simple network with few linear neurons" << std::endl;
std::cout << "***********************************************************************************************************************" <<std::endl;
std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
std::cout << "0 * w1 + 1 * w2 = 0.50" << std::endl;
std::cout << "1 * w1 + 0.5*w2 = 0.75" << std::endl;
std::cout << "***********************************************************************************************************************" <<std::endl;
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
std::vector<double> inp, out;
......@@ -35,20 +41,15 @@ int main() {
NeuronLinear *i2 = new NeuronLinear( ); //f(x) = x
/* Output neuron */
double b = 1.0;//bias
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x
/* Adding neurons to the net */
size_t idx1 = net.add_neuron(i1, BIAS_TYPE::NO_BIAS);
size_t idx2 = net.add_neuron(i2, BIAS_TYPE::NO_BIAS);
size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NEXT_BIAS);
std::vector<double> *bv = net.get_parameter_ptr_biases();
for(size_t i = 0; i < 1; ++i){
bv->at(i) = 1.0;
}
size_t idx3 = net.add_neuron(o1, BIAS_TYPE::NO_BIAS);
//
/* Adding connections */
net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
......@@ -70,18 +71,39 @@ int main() {
MSE mse(&net, &ds);
/* TRAINING METHOD SETUP */
unsigned int max_iters = 2000;
double domain_bounds[4] = {-800.0, 800.0, -800.0, 800.0};
double c1 = 0.5, c2 = 1.5, w = 0.8;
std::vector<double> domain_bounds = {-10.0, 10.0, -10.0, 10.0, -10.0, 10.0};
ParticleSwarm swarm_01(&mse, &domain_bounds);
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
double gamma = 0.5;
/* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
* terminating criterion is met ('n' is the total number of particles) */
double epsilon = 0.02;
double delta = 0.7;
swarm_01.optimize(gamma, epsilon, delta);
std::vector<double> *parameters = swarm_01.get_solution();
net.copy_parameter_space(parameters);
printf("w1 = %10.7f\n", parameters->at( 0 ));
printf("w2 = %10.7f\n", parameters->at( 1 ));
std::cout << "***********************************************************************************************************************" <<std::endl;
/* ERROR CALCULATION */
double error = 0.0;
inp = {0, 1};
net.eval_single( inp, out );
error += (0.5 - out[0]) * (0.5 - out[0]);
std::cout << "x = (0, 1), expected output: 0.50, real output: " << out[0] << std::endl;
unsigned int n_particles = 10;
inp = {1, 0.5};
net.eval_single( inp, out );
error += (0.75 - out[0]) * (0.75 - out[0]);
std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl;
std::cout << "Run finished! Error of the network: " << 0.5 * error << std::endl;
ParticleSwarm swarm_01(&mse, domain_bounds, c1, c2, w, n_particles, max_iters);
swarm_01.optimize(0.5, 0.02);
return 0;
}
/**
* Example of a neural network with reused edge weights
* The system of equations associated with the net in this example is not regular
* minimizes the function: ((2y+0.5)^2 + (2x+1)^2 + (2x + y + 0.25)^2 + (2x+1)^2 + 1 + (4.5x + 0.37)^2 ) /3
* minimum [0.705493164] at (x, y) = (-1133/6290, -11193/62900) = (-0.180127186, -0.177949126)
*/
//
......@@ -14,6 +11,13 @@
#include "../include/4neuro.h"
int main() {
std::cout << "Running lib4neuro example 2: Basic use of the particle swarm method to train a network with five linear neurons and repeating edge weights" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
std::cout << "The code attempts to find an approximate solution to the system of equations below:" << std::endl;
std::cout << " 0 * w1 + 1 * w2 = 0.50 + b1" << std::endl;
std::cout << " 1 * w1 + 0.5*w2 = 0.75 + b1" << std::endl;
std::cout << "(1.25 + b2) * w2 = 0.63 + b3" << std::endl;
std::cout << "***********************************************************************************************************************" <<std::endl;
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
......@@ -40,11 +44,11 @@ int main() {
NeuronLinear *i2 = new NeuronLinear( ); //f(x) = x
double b = 1;//bias
NeuronLinear *i3 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *i3 = new NeuronLinear( ); //f(x) = x
/* Output neurons */
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *o2 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x
NeuronLinear *o2 = new NeuronLinear( ); //f(x) = x
......@@ -55,14 +59,7 @@ int main() {
size_t idx4 = net.add_neuron(i3, BIAS_TYPE::NEXT_BIAS);
size_t idx5 = net.add_neuron(o2, BIAS_TYPE::NEXT_BIAS);
std::vector<double> *bv = net.get_parameter_ptr_biases();
for(size_t i = 0; i < 3; ++i){
bv->at(i) = 1.0;
}
/* Adding connections */
//net.add_connection_simple(idx1, idx3, -1, 1.0);
//net.add_connection_simple(idx2, idx3, -1, 1.0);
net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
net.add_connection_simple(idx4, idx5, SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, 0); // AGAIN weight index 0 - same weight!
......@@ -94,21 +91,50 @@ int main() {
// printf("evaluation of error at point (%f, %f) => %f\n", weights[0], weights[1], mse.eval(weights));
/* TRAINING METHOD SETUP */
unsigned int max_iters = 5000;
//must encapsulate each of the partial error functions
double domain_bounds[4] = {-800.0, 800.0, -800.0, 800.0};
std::vector<double> domain_bounds = {-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0};
ParticleSwarm swarm_01(&mse, &domain_bounds);
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
double gamma = 0.5;
/* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
* terminating criterion is met ('n' is the total number of particles) */
double epsilon = 0.02;
double delta = 0.9;
swarm_01.optimize(gamma, epsilon, delta);
std::vector<double> *parameters = swarm_01.get_solution();
net.copy_parameter_space(parameters);
printf("w1 = %10.7f\n", parameters->at( 0 ));
printf("w2 = %10.7f\n", parameters->at( 1 ));
printf("b1 = %10.7f\n", parameters->at( 2 ));
printf("b2 = %10.7f\n", parameters->at( 3 ));
printf("b3 = %10.7f\n", parameters->at( 4 ));
std::cout << "***********************************************************************************************************************" <<std::endl;
/* ERROR CALCULATION */
double error = 0.0;
inp = {0, 1, 0};
net.eval_single( inp, out );
error += (0.5 - out[0]) * (0.5 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.5, 0.0, out[0], out[1]);
double c1 = 0.5, c2 = 1.5, w = 0.8;
inp = {1, 0.5, 0};
net.eval_single( inp, out );
error += (0.75 - out[0]) * (0.75 - out[0]) + (0.0 - out[1]) * (0.0 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.75, 0.0, out[0], out[1]);
unsigned int n_particles = 100;
inp = {0, 0, 1.25};
net.eval_single( inp, out );
error += (0.0 - out[0]) * (0.0 - out[0]) + (0.63 - out[1]) * (0.63 - out[1]);
printf("x = (%4.2f, %4.2f, %4.2f), expected output: (%4.2f, %4.2f), real output: (%10.7f, %10.7f)\n", inp[0], inp[1], inp[2], 0.0, 0.63, out[0], out[1]);
ParticleSwarm swarm_01(&mse, domain_bounds, c1, c2, w, n_particles, max_iters);
std::cout << "Run finished! Error of the network: " << error / 3.0 << std::endl;
swarm_01.optimize(0.5, 0.02, 0.9);
printf("evaluation of error: %f\n", mse.eval());
return 0;
}
\ No newline at end of file
......@@ -14,6 +14,8 @@
#include "../include/4neuro.h"
int main() {
std::cout << "Running lib4neuro example 3: Use of the particle swarm method to train a set of networks sharing some edge weights" << std::endl;
std::cout << "********************************************************************************************************************" <<std::endl;
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_01, data_vec_02;
......@@ -42,11 +44,11 @@ int main() {
NeuronLinear *i1 = new NeuronLinear(); //f(x) = x
NeuronLinear *i2 = new NeuronLinear(); //f(x) = x
NeuronLinear *i3 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *i3 = new NeuronLinear( ); //f(x) = x
/* Output neurons */
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *o2 = new NeuronLinear( ); //f(x) = x + 1
NeuronLinear *o1 = new NeuronLinear( ); //f(x) = x
NeuronLinear *o2 = new NeuronLinear( ); //f(x) = x
......@@ -57,11 +59,6 @@ int main() {
size_t idx4 = net.add_neuron(i3, BIAS_TYPE::NEXT_BIAS);
size_t idx5 = net.add_neuron(o2, BIAS_TYPE::NEXT_BIAS);
std::vector<double> *bv = net.get_parameter_ptr_biases();
for(size_t i = 0; i < 3; ++i){
bv->at(i) = 1.0;
}
/* Adding connections */
net.add_connection_simple(idx1, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 0
net.add_connection_simple(idx2, idx3, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); // weight index 1
......@@ -91,40 +88,50 @@ int main() {
subnet_01_input_neurons.push_back(idx1);
subnet_01_input_neurons.push_back(idx2);
subnet_01_output_neurons.push_back(idx3);
NeuralNetwork *subnet_01 = net.get_subnet(subnet_01_input_neurons, subnet_01_output_neurons);
NeuralNetwork *subnet_01 = net.get_subnet( subnet_01_input_neurons, subnet_01_output_neurons );
subnet_02_input_neurons.push_back(idx4);
subnet_02_output_neurons.push_back(idx5);
NeuralNetwork *subnet_02 = net.get_subnet(subnet_02_input_neurons, subnet_02_output_neurons);
NeuralNetwork *subnet_02 = net.get_subnet( subnet_02_input_neurons, subnet_02_output_neurons );
/* COMPLEX ERROR FUNCTION SPECIFICATION */
MSE mse_01(subnet_01, &ds_01);
MSE mse_02(subnet_02, &ds_02);
if(subnet_01 && subnet_02){
/* COMPLEX ERROR FUNCTION SPECIFICATION */
MSE mse_01(subnet_01, &ds_01);
MSE mse_02(subnet_02, &ds_02);
ErrorSum mse_sum;
mse_sum.add_error_function( &mse_01 );
mse_sum.add_error_function( &mse_02 );
ErrorSum mse_sum;
mse_sum.add_error_function( &mse_01 );
mse_sum.add_error_function( &mse_02 );
/* TRAINING METHOD SETUP */
unsigned int max_iters = 50;
/* TRAINING METHOD SETUP */
std::vector<double> domain_bounds = {-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0,-10.0, 10.0, -10.0, 10.0};
ParticleSwarm swarm_01(&mse_sum, &domain_bounds);
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
double gamma = 0.5;
//must encapsulate each of the partial error functions
double domain_bounds[4] = {-800.0, 800.0, -800.0, 800.0};
/* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
* terminating criterion is met ('n' is the total number of particles) */
double epsilon = 0.02;
double delta = 0.9;
swarm_01.optimize(gamma, epsilon, delta);
double c1 = 0.5, c2 = 1.5, w = 0.8;
unsigned int n_particles = 100;
// printf("mse2: %d\n", mse_02.get_dimension());
ParticleSwarm swarm_01(&mse_sum, domain_bounds, c1, c2, w, n_particles, max_iters);
}
else{
std::cout << "We apologize, this example is unfinished as we are in the process of developing methods for efficient subnetwork definition" << std::endl;
}
swarm_01.optimize(0.5, 0.02, 0.9);
if(subnet_01){
delete subnet_01;
subnet_01 = nullptr;
}
// double weights[2] = {0, -0.25};
// printf("evaluation of error at (x, y) = (%f, %f): %f\n", weights[0], weights[1], mse_01.eval(weights));
if(subnet_02){
delete subnet_02;
subnet_02 = nullptr;
}
delete subnet_02;
delete subnet_01;
return 0;
}
\ No newline at end of file
......@@ -318,6 +318,9 @@ double calculate_gradient( std::vector<double> &data_points, size_t n_inner_neur
void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, double d1_s, double d1_e,
size_t test_size, double ts, double te) {
std::cout << "Finding a solution via a Gradient Descent method with adaptive step-length" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;
......@@ -362,13 +365,13 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
}
// for (i = 0; i < n_inner_neurons; ++i) {
// wi = (*params_current)[3 * i];
// ai = (*params_current)[3 * i + 1];
// bi = (*params_current)[3 * i + 2];
//
// printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
// }
gamma = 1.0;
double prev_val, val = 0.0, c = 2.0;
......@@ -395,6 +398,8 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
}
else{
/* norm of the gradient calculation */
sk = 0.0;
......@@ -449,10 +454,12 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
printf("Path %3d. w%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), wi, (int)(i + 1), bi, (int)(i + 1), ai);
}
std::cout << "********************************************************************************************************************************************" <<std::endl;
printf("\n--------------------------------------------------\ntest output for gnuplot\n--------------------------------------------------\n");
// data_export_gradient(params_current);
// if(total_error < 1e-3 || true){
// /* ISOTROPIC TEST SET */
// frac = (te - ts) / (test_size - 1);
......@@ -463,27 +470,6 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
// }
// }
/* error analysis */
double referential_error = 0.0;
mem = eval_approx_df(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
mem = eval_approx_f(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
frac = 1.0 / train_size;
for(j = 0; j < data_points.size(); ++j){
// xj = ds.get_data()->at(j).first[0];
xj = data_points[i];
mem = 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + eval_approx_ddf(xj, n_inner_neurons, *params_current);
referential_error += mem * mem * frac;
}
printf("Total error (as used in the NN example): %10.8f\n", referential_error);
delete gradient_current;
delete gradient_prev;
delete params_current;
......@@ -494,6 +480,9 @@ void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, siz
void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){
std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
/* SOLVER SETUP */
size_t n_inputs = 1;
size_t n_equations = 3;
......@@ -575,62 +564,66 @@ void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double
std::uniform_real_distribution<double> dist(-10.0, 10.0);
std::vector<double> input(1);
for( size_t testi = 0; testi < 50; ++testi ){
double test_error_eq1 = 0.0, total_error = 0.0;
for(size_t i = 0; i < params.size(); ++i){
params[i] = dist(gen);
}
for(size_t i = 0; i < n_inner_neurons; ++i){
params_analytical[3 * i] = params[i];
params_analytical[3 * i + 1] = params[n_inner_neurons + i];
params_analytical[3 * i + 2] = params[2 * n_inner_neurons + i];
}
for(auto d: *ds_00.get_data()){
input = d.first;
double x = input[0];
double analytical_value_f = eval_approx_f(x, n_inner_neurons, params_analytical);
double analytical_value_df = eval_approx_df(x, n_inner_neurons, params_analytical);
double analytical_value_ddf = eval_approx_ddf(x, n_inner_neurons, params_analytical);
double de_solver_value_eq1 = solver_01.eval_equation(0, &params, input);
double analytical_value_eq1 = 4 * analytical_value_f + 4 * analytical_value_df + analytical_value_ddf;
test_error_eq1 += (de_solver_value_eq1 - analytical_value_eq1) * (de_solver_value_eq1 - analytical_value_eq1);
}
input[0] = 0.0;
double de_solver_value_eq2 = solver_01.eval_equation(1, &params, input);
double analytical_value_eq2 = eval_approx_f(0.0, n_inner_neurons, params_analytical);
double test_error_eq2 = (de_solver_value_eq2 - analytical_value_eq2) * (de_solver_value_eq2 - analytical_value_eq2);
double de_solver_value_eq3 = solver_01.eval_equation(2, &params, input);
double analytical_value_eq3 = eval_approx_df(0.0, n_inner_neurons, params_analytical);
double test_error_eq3 = (de_solver_value_eq3 - analytical_value_eq3) * (de_solver_value_eq3 - analytical_value_eq3);
double total_error_de_solver = solver_01.eval_total_error(params);
double total_error_analytical = eval_error_function(params_analytical, n_inner_neurons, test_points);
printf("\tRepresentation test %6d, error of eq1: %10.8f, error of eq2: %10.8f, error of eq3: %10.8f, total error: %10.8f\n", (int)testi, std::sqrt(test_error_eq1), std::sqrt(test_error_eq2), std::sqrt(test_error_eq3), (total_error_analytical - total_error_de_solver) * (total_error_analytical - total_error_de_solver));
}
// for( size_t testi = 0; testi < 50; ++testi ){
// double test_error_eq1 = 0.0, total_error = 0.0;
// for(size_t i = 0; i < params.size(); ++i){
// params[i] = dist(gen);
// }
// for(size_t i = 0; i < n_inner_neurons; ++i){
// params_analytical[3 * i] = params[i];
// params_analytical[3 * i + 1] = params[n_inner_neurons + i];
// params_analytical[3 * i + 2] = params[2 * n_inner_neurons + i];
// }
//
// for(auto d: *ds_00.get_data()){
// input = d.first;
// double x = input[0];
//
// double analytical_value_f = eval_approx_f(x, n_inner_neurons, params_analytical);
// double analytical_value_df = eval_approx_df(x, n_inner_neurons, params_analytical);
// double analytical_value_ddf = eval_approx_ddf(x, n_inner_neurons, params_analytical);
//
// double de_solver_value_eq1 = solver_01.eval_equation(0, &params, input);
// double analytical_value_eq1 = 4 * analytical_value_f + 4 * analytical_value_df + analytical_value_ddf;
// test_error_eq1 += (de_solver_value_eq1 - analytical_value_eq1) * (de_solver_value_eq1 - analytical_value_eq1);
//
// }
// input[0] = 0.0;
// double de_solver_value_eq2 = solver_01.eval_equation(1, &params, input);
// double analytical_value_eq2 = eval_approx_f(0.0, n_inner_neurons, params_analytical);
// double test_error_eq2 = (de_solver_value_eq2 - analytical_value_eq2) * (de_solver_value_eq2 - analytical_value_eq2);
//
// double de_solver_value_eq3 = solver_01.eval_equation(2, &params, input);
// double analytical_value_eq3 = eval_approx_df(0.0, n_inner_neurons, params_analytical);
// double test_error_eq3 = (de_solver_value_eq3 - analytical_value_eq3) * (de_solver_value_eq3 - analytical_value_eq3);
//
// double total_error_de_solver = solver_01.eval_total_error(params);
//
// double total_error_analytical = eval_error_function(params_analytical, n_inner_neurons, test_points);
//
// printf("\tRepresentation test %6d, error of eq1: %10.8f, error of eq2: %10.8f, error of eq3: %10.8f, total error: %10.8f\n", (int)testi, std::sqrt(test_error_eq1), std::sqrt(test_error_eq2), std::sqrt(test_error_eq3), (total_error_analytical - total_error_de_solver) * (total_error_analytical - total_error_de_solver));
// }
/* PARTICLE SWARM TRAINING METHOD SETUP */
//must encapsulate each of the partial error functions
double *domain_bounds = new double[ 6 * n_inner_neurons ];
std::vector<double> domain_bounds(6 * n_inner_neurons);