Newer
Older
/**
* Example solving the following ODE:
*
* g(t) = (d^2/d^t)y(t) + 4 (d/dt)y(t) + 4y(t) = 0, for t in [0, 4]
* y(0) = 1
* (d/dt)y(0) = 1
*
* -------------------------------------------
* Analytical solution: e^(-2x) * (3x + 1)
* NN representation: sum over [a_i * (1 + e^(-x * w_i + b_i))^(-1)]
* -------------------------------------------
* Optimal NN setting with biases (2 inner neurons)

Michal Kravcenko
committed
* Path 1. w = -1.66009975, b = -0.40767447, a = 2.46457042
* Path 2. w = -4.38622765, b = 2.75707816, a = -8.04752347
* @author Michal Kravčenko
* @date 17.7.18 -
*/
#include "4neuro.h"
Martin Beseda
committed
return std::pow(l4n::E, -2.0 * x) * (3.0 * x + 1.0);
Martin Beseda
committed
return std::pow(l4n::E, -2.0 * x) * (1.0 - 6.0 * x);
Martin Beseda
committed
return 4.0 * std::pow(l4n::E, -2.0 * x) * (3.0 * x - 2.0);
}
double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
value += ai / (ei1);
}
return value;
}
double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
value += ai * wi * ei / (ei1 * ei1);
}
return value;
}
double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ewx, eb;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
Martin Beseda
committed
eb = std::pow(l4n::E, bi);
ewx = std::pow(l4n::E, wi * x);
value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
return value;
}
//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
return (ai * x * ei) / (ei1 * ei1);
}
//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
}
//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, eb, ewx;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
eb = std::pow(l4n::E, bi);
ewx = std::pow(l4n::E, wi * x);
return -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
return 1.0 / ei1;
}
//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
return (wi*ei)/(ei1 * ei1);
}
//ddNN partial derivative (ai): -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, eip, ewx, eb;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
eip = std::pow(l4n::E, bi + wi * x);
eb = std::pow(l4n::E, bi);
ewx = std::pow(l4n::E, wi * x);
return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ai, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
ei = std::pow(l4n::E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * ei)/(ei1 * ei1);
}
//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
eb = std::pow(l4n::E, bi);
ewx = std::pow(l4n::E, wi*x);
return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
Martin Beseda
committed
eb = std::pow(l4n::E, bi);
ewx = std::pow(l4n::E, wi*x);
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
}
double eval_error_function(std::vector<double> ¶meters, size_t n_inner_neurons, std::vector<double> test_points){
double output = 0.0, approx, frac = 1.0 / (test_points.size());
for(auto x: test_points){
/* governing equation */
approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
output += (0.0 - approx) * (0.0 - approx) * frac;
}
/* BC */
approx = eval_approx_f(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
approx = eval_approx_df(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
return output;
}
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
void eval_step_size_simple(double &gamma, double val, double prev_val, double sk, double grad_norm, double grad_norm_prev){
if(val > prev_val){
gamma *= 0.99999;
}
if(sk <= 1e-3 || grad_norm < grad_norm_prev){
/* movement on a line */
/* new slope is less steep, speed up */
gamma *= 1.0005;
}
else if(grad_norm > grad_norm_prev){
/* new slope is more steep, slow down*/
gamma /= 1.0005;
}
else{
gamma /= 1.005;
}
// gamma *= 0.999999;
}
void eval_step_size_mk( double &gamma, double beta, double &c, double grad_norm_prev, double grad_norm, double fi, double fim ){
if( fi > fim )
{
c /= 1.0000005;
}
else if( fi < fim )
{
c *= 1.0000005;
}
gamma *= std::pow( c, 1.0 - 2.0 * beta) * std::pow( grad_norm_prev / grad_norm, 1.0 / c );
}
double calculate_gradient( std::vector<double> &data_points, size_t n_inner_neurons, std::vector<double> *parameters, std::vector<double> *gradient ){
size_t i, j;
double x, mem, derror, total_error, approx;
size_t train_size = data_points.size();
/* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
x = 0.0;
mem = (1.0 - eval_approx_f(x, n_inner_neurons, *parameters));
derror = 2.0 * mem;
total_error = mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
(*gradient)[3 * i] -= derror * eval_approx_dw_f(x, i, *parameters);
(*gradient)[3 * i + 1] -= derror * eval_approx_da_f(x, i, *parameters);
(*gradient)[3 * i + 2] -= derror * eval_approx_db_f(x, i, *parameters);
}
/* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
mem = (1.0 - eval_approx_df(x, n_inner_neurons, *parameters));
derror = 2.0 * mem;
total_error += mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
(*gradient)[3 * i] -= derror * eval_approx_dw_df(x, i, *parameters);
(*gradient)[3 * i + 1] -= derror * eval_approx_da_df(x, i, *parameters);
(*gradient)[3 * i + 2] -= derror * eval_approx_db_df(x, i, *parameters);
}
for(j = 0; j < data_points.size(); ++j){
x = data_points[j];
/* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
approx= eval_approx_ddf(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_f(x, n_inner_neurons, *parameters);
mem = 0.0 - approx;
derror = 2.0 * mem / train_size;
for(i = 0; i < n_inner_neurons; ++i){
(*gradient)[3 * i] -= derror * (eval_approx_dw_ddf(x, i, *parameters) + 4.0 * eval_approx_dw_df(x, i, *parameters) + 4.0 * eval_approx_dw_f(x, i, *parameters));
(*gradient)[3 * i + 1] -= derror * (eval_approx_da_ddf(x, i, *parameters) + 4.0 * eval_approx_da_df(x, i, *parameters) + 4.0 * eval_approx_da_f(x, i, *parameters));
(*gradient)[3 * i + 2] -= derror * (eval_approx_db_ddf(x, i, *parameters) + 4.0 * eval_approx_db_df(x, i, *parameters) + 4.0 * eval_approx_db_f(x, i, *parameters));
}
total_error += mem * mem / train_size;
}
return total_error;
}
void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, double d1_s, double d1_e,

Michal Kravcenko
committed
std::cout << "Finding a solution via a Gradient Descent method with adaptive step-length" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
double grad_norm = accuracy * 10.0, mem, ai, bi, wi, error, derror, approx, xj, gamma, total_error, sk, sy, sx, sg, beta;
double grad_norm_prev = grad_norm;
size_t i, j, iter_idx = 0;

Michal Kravcenko
committed
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
data_points[i] = frac * i;
}
// /* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size );
// frac = 0.5 * (d1_e - d1_s);
// for(i = 0; i < train_size; ++i){
// x = (std::cos(PI - alpha * i) + 1.0) * frac + d1_s;
// data_points[i] = x;
Martin Beseda
committed
// l4n::DataSet ds(0.0, 4.0, train_size, 0.0);
std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *params_current = new std::vector<double>(guess);
std::vector<double> *params_prev = new std::vector<double>(guess);
std::vector<double> *conjugate_direction_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *conjugate_direction_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *ptr_mem;
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
std::fill(conjugate_direction_current->begin(), conjugate_direction_current->end(), 0.0);
std::fill(conjugate_direction_prev->begin(), conjugate_direction_prev->end(), 0.0);

Michal Kravcenko
committed
// for (i = 0; i < n_inner_neurons; ++i) {
// wi = (*params_current)[3 * i];
// ai = (*params_current)[3 * i + 1];
// bi = (*params_current)[3 * i + 2];
//
// printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
// }
double prev_val, val = 0.0, c = 2.0;
prev_val = val;
grad_norm_prev = grad_norm;
/* reset of the current gradient */
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
val = calculate_gradient( data_points, n_inner_neurons, params_current, gradient_current );
grad_norm = 0.0;
for(auto v: *gradient_current){
grad_norm += v * v;
grad_norm = std::sqrt(grad_norm);
/* Update of the parameters */
/* step length calculation */
if(iter_idx < 10 || iter_idx % 100 == 0){

Michal Kravcenko
committed
/* norm of the gradient calculation */
sk = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
sx = (*gradient_current)[i] - (*gradient_prev)[i];
sk += sx * sx;
}
sk = std::sqrt(sk);
/* angle between two consecutive gradients */
double beta = 0.0, sx = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
sx += (gradient_current->at( i ) * gradient_prev->at( i ));
}
sx /= grad_norm * grad_norm_prev;
Martin Beseda
committed
beta = std::sqrt(std::acos( sx ) / l4n::PI);
// eval_step_size_simple( gamma, val, prev_val, sk, grad_norm, grad_norm_prev );
eval_step_size_mk( gamma, beta, c, grad_norm_prev, grad_norm, val, prev_val );
for(i = 0; i < gradient_current->size(); ++i){
(*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
}
/* switcheroo */
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
gradient_current = ptr_mem;
ptr_mem = params_prev;
params_prev = params_current;
params_current = ptr_mem;
if(iter_idx % 1 == 0){
printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r", (int)iter_idx, gamma, c, grad_norm, val);
printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n", (int)iter_idx, gamma, c, grad_norm, val);
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];

Michal Kravcenko
committed
printf("Path %3d. w%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), wi, (int)(i + 1), bi, (int)(i + 1), ai);

Michal Kravcenko
committed
std::cout << "********************************************************************************************************************************************" <<std::endl;

Michal Kravcenko
committed
// data_export_gradient(params_current);
// if(total_error < 1e-3 || true){
// /* ISOTROPIC TEST SET */
// frac = (te - ts) / (test_size - 1);
// for(j = 0; j < test_size; ++j){
// xj = frac * j + ts;
//
// std::cout << j + 1 << " " << xj << " " << eval_f(xj) << " " << eval_approx_f(xj, n_inner_neurons, *params_current) << " " << eval_df(xj) << " " << eval_approx_df(xj, n_inner_neurons, *params_current) << " " << eval_ddf(xj) << " " << eval_approx_ddf(xj, n_inner_neurons, *params_current) << std::endl;
// }
// }
delete gradient_current;
delete gradient_prev;
delete params_current;
delete params_prev;
delete conjugate_direction_current;
delete conjugate_direction_prev;
void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){

Michal Kravcenko
committed
std::cout << "Finding a solution via the Particle Swarm Optimization" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;

Michal Kravcenko
committed
/* SOLVER SETUP */
Martin Beseda
committed
l4n::DESolver solver_01( n_equations, n_inputs, n_inner_neurons );

Michal Kravcenko
committed
/* SETUP OF THE EQUATIONS */
Martin Beseda
committed
l4n::MultiIndex alpha_0( n_inputs );
l4n::MultiIndex alpha_1( n_inputs );
l4n::MultiIndex alpha_2( n_inputs );

Michal Kravcenko
committed
alpha_2.set_partial_derivative(0, 2);
alpha_1.set_partial_derivative(0, 1);
/* the governing differential equation */

Michal Kravcenko
committed
solver_01.add_to_differential_equation( 0, alpha_2, "1.0" );
solver_01.add_to_differential_equation( 0, alpha_1, "4.0" );
solver_01.add_to_differential_equation( 0, alpha_0, "4.0" );

Michal Kravcenko
committed
/* dirichlet boundary condition */

Michal Kravcenko
committed
solver_01.add_to_differential_equation( 1, alpha_0, "1.0" );

Michal Kravcenko
committed
/* neumann boundary condition */

Michal Kravcenko
committed
solver_01.add_to_differential_equation( 2, alpha_1, "1.0" );

Michal Kravcenko
committed
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
double d1_s = ds, d1_e = de, frac;

Michal Kravcenko
committed
/* TRAIN DATA FOR THE GOVERNING DE */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;

Michal Kravcenko
committed
std::vector<double> test_points(train_size);

Michal Kravcenko
committed
/* ISOTROPIC TRAIN SET */
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
inp = {frac * i};
out = {0.0};
data_vec_g.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
test_points[i] = inp[0];
}
/* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size - 1);
// frac = 0.5 * (d1_e - d1_s);
// for(unsigned int i = 0; i < train_size; ++i){
// inp = {(std::cos(alpha * i) + 1.0) * frac + d1_s};
// out = {0.0};
// data_vec_g.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
//
// test_points[i] = inp[0];
Martin Beseda
committed
l4n::DataSet ds_00(&data_vec_g);
/* TRAIN DATA FOR DIRICHLET BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_y;
inp = {0.0};
out = {1.0};
data_vec_y.emplace_back(std::make_pair(inp, out));
Martin Beseda
committed
l4n::DataSet ds_01(&data_vec_y);
/* TRAIN DATA FOR NEUMANN BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
inp = {0.0};
out = {1.0};
data_vec_dy.emplace_back(std::make_pair(inp, out));
Martin Beseda
committed
l4n::DataSet ds_02(&data_vec_dy);

Michal Kravcenko
committed
/* Placing the conditions into the solver */
Martin Beseda
committed
solver_01.set_error_function( 0, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_00 );
solver_01.set_error_function( 1, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_01 );
solver_01.set_error_function( 2, l4n::ErrorFunctionType::ErrorFuncMSE, &ds_02 );

Michal Kravcenko
committed
size_t total_dim = (2 + n_inputs) * n_inner_neurons;
std::vector<double> params(total_dim), params_analytical(total_dim);

Michal Kravcenko
committed
std::random_device seeder;
std::mt19937 gen(seeder());
std::uniform_real_distribution<double> dist(-10.0, 10.0);
std::vector<double> input(1);

Michal Kravcenko
committed
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
// for( size_t testi = 0; testi < 50; ++testi ){
// double test_error_eq1 = 0.0, total_error = 0.0;
// for(size_t i = 0; i < params.size(); ++i){
// params[i] = dist(gen);
// }
// for(size_t i = 0; i < n_inner_neurons; ++i){
// params_analytical[3 * i] = params[i];
// params_analytical[3 * i + 1] = params[n_inner_neurons + i];
// params_analytical[3 * i + 2] = params[2 * n_inner_neurons + i];
// }
//
// for(auto d: *ds_00.get_data()){
// input = d.first;
// double x = input[0];
//
// double analytical_value_f = eval_approx_f(x, n_inner_neurons, params_analytical);
// double analytical_value_df = eval_approx_df(x, n_inner_neurons, params_analytical);
// double analytical_value_ddf = eval_approx_ddf(x, n_inner_neurons, params_analytical);
//
// double de_solver_value_eq1 = solver_01.eval_equation(0, ¶ms, input);
// double analytical_value_eq1 = 4 * analytical_value_f + 4 * analytical_value_df + analytical_value_ddf;
// test_error_eq1 += (de_solver_value_eq1 - analytical_value_eq1) * (de_solver_value_eq1 - analytical_value_eq1);
//
// }
// input[0] = 0.0;
// double de_solver_value_eq2 = solver_01.eval_equation(1, ¶ms, input);
// double analytical_value_eq2 = eval_approx_f(0.0, n_inner_neurons, params_analytical);
// double test_error_eq2 = (de_solver_value_eq2 - analytical_value_eq2) * (de_solver_value_eq2 - analytical_value_eq2);
//
// double de_solver_value_eq3 = solver_01.eval_equation(2, ¶ms, input);
// double analytical_value_eq3 = eval_approx_df(0.0, n_inner_neurons, params_analytical);
// double test_error_eq3 = (de_solver_value_eq3 - analytical_value_eq3) * (de_solver_value_eq3 - analytical_value_eq3);
//
// double total_error_de_solver = solver_01.eval_total_error(params);
//
// double total_error_analytical = eval_error_function(params_analytical, n_inner_neurons, test_points);
//
// printf("\tRepresentation test %6d, error of eq1: %10.8f, error of eq2: %10.8f, error of eq3: %10.8f, total error: %10.8f\n", (int)testi, std::sqrt(test_error_eq1), std::sqrt(test_error_eq2), std::sqrt(test_error_eq3), (total_error_analytical - total_error_de_solver) * (total_error_analytical - total_error_de_solver));
// }

Michal Kravcenko
committed

Michal Kravcenko
committed
/* PARTICLE SWARM TRAINING METHOD SETUP */
//must encapsulate each of the partial error functions

Michal Kravcenko
committed
std::vector<double> domain_bounds(2 * total_dim);
for(unsigned int i = 0; i < total_dim; ++i){

Michal Kravcenko
committed
domain_bounds[2 * i] = -10.0;
domain_bounds[2 * i + 1] = 10.0;

Michal Kravcenko
committed
double c1 = 1.7, c2 = 1.7, w = 0.7;
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
double gamma = 0.5;

Michal Kravcenko
committed

Michal Kravcenko
committed
/* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
* terminating criterion is met ('n' is the total number of particles) */
double epsilon = 0.02;
double delta = 0.9;
solver_01.solve_via_particle_swarm( &domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );

Michal Kravcenko
committed
Martin Beseda
committed
l4n::NeuralNetwork *solution = solver_01.get_solution( alpha_0 );
l4n::NeuralNetwork *solution_d = solver_01.get_solution( alpha_1 );
l4n::NeuralNetwork *solution_dd = solver_01.get_solution( alpha_2 );

Michal Kravcenko
committed

Michal Kravcenko
committed
std::vector<double> parameters(total_dim);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
for(size_t i = 0; i < n_inner_neurons; ++i){
parameters[3 * i] = weight_params->at(i);
parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
parameters[3 * i + 2] = biases_params->at(i);

Michal Kravcenko
committed
printf("Path %3d. w%d = %15.8f, b%d = %15.8f, a%d = %15.8f\n", (int)(i + 1), (int)(i + 1), parameters[3 * i], (int)(i + 1), parameters[3 * i + 2], (int)(i + 1), parameters[3 * i + 1]);

Michal Kravcenko
committed
/* ISOTROPIC TEST SET FOR BOUNDARY CONDITIONS */
/* first boundary condition & its error */
std::ofstream ofs("data_1d_ode1.txt", std::ofstream::out);
printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\r", 0.0);
frac = (te - ts) / (n_test_points - 1);
for(size_t i = 0; i < n_test_points; ++i){
double x = frac * i + ts;
inp[0] = x;
solution->eval_single(inp, out);
double F = out[0];
solution_d->eval_single( inp, out);
double DF = out[0];
solution_dd->eval_single( inp, out);
double DDF = out[0];
Martin Beseda
committed
ofs << i + 1 << " " << x << " " << std::pow(l4n::E, -2*x) * (3*x + 1)<< " " << F << " " << std::pow(l4n::E, -2*x) * (1 - 6*x)<< " " << DF << " " << 4 * std::pow(l4n::E, -2*x) * (3*x - 2)<< " " << DDF << std::endl;

Michal Kravcenko
committed
printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\r", (100.0 * i) / (n_test_points - 1));
std::cout.flush();
}
printf("Exporting files 'data_1d_ode1.txt': %7.3f%%\n", 100.0);
std::cout.flush();
ofs.close();

Michal Kravcenko
committed
std::cout << "********************************************************************************************************************************************" <<std::endl;

Michal Kravcenko
committed
// data_export_particle(parameters);
// std::vector<double> output(1);
// double x;
// for(unsigned int i = 0; i < n_test_points; ++i){
//
// x = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
// input[0] = x;
//
// solution->eval_single(input, output);
//
Martin Beseda
committed
// std::cout << i + 1 << " " << x << " " << std::pow(l4n::E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(l4n::E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(l4n::E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;

Michal Kravcenko
committed
// }

Michal Kravcenko
committed
std::cout << "Running lib4neuro Ordinary Differential Equation example 1" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
std::cout << " Governing equation: y''(x) + 4y'(x) + 4y(x) = 0.0, for x in [0, 4]" << std::endl;
std::cout << "Dirichlet boundary condition: y(0.0) = 1.0" << std::endl;
std::cout << " Neumann boundary condition: y'(0.0) = 1.0" << std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;
std::cout << "Expressing solution as y(x) = sum over [a_i / (1 + exp(bi - wxi*x ))], i in [1, n], where n is the number of hidden neurons" <<std::endl;
std::cout << "********************************************************************************************************************************************" <<std::endl;

Michal Kravcenko
committed
unsigned int n_inner_neurons = 2;
unsigned int train_size = 10;

Michal Kravcenko
committed
double ds = 0.0;
double de = 4.0;

Michal Kravcenko
committed
unsigned int test_size = 300;
double ts = ds;
double te = de + 2;

Michal Kravcenko
committed
size_t particle_swarm_max_iters = 1000;
size_t n_particles = 100;
test_ode(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
// std::vector<double> init_guess = {0.35088209, -0.23738505, 0.14160885, 3.72785473, -6.45758308, 1.73769138};

Michal Kravcenko
committed
// std::vector<double> init_guess(3 * n_inner_neurons);
//
// std::random_device seeder;
// std::mt19937 gen(seeder());
// std::uniform_real_distribution<double> dist(-1.0, 1.0);
// for(unsigned int i = 0; i < init_guess.size(); ++i){
// init_guess[i] = dist(gen);
// }

Michal Kravcenko
committed
//TODO, sometimes produces nans in combinations with extremely high number of iterations, INVESTIGATE
// test_analytical_gradient_y(init_guess, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te);