Newer
Older
/**
* Example solving the following ODE:
*
* g(t) = (d^2/d^t)y(t) + 4 (d/dt)y(t) + 4y(t) = 0, for t in [0, 4]
* y(0) = 1
* (d/dt)y(0) = 1
*
* -------------------------------------------
* Analytical solution: e^(-2x) * (3x + 1)
* NN representation: sum over [a_i * (1 + e^(-x * w_i + b_i))^(-1)]
* -------------------------------------------
* Optimal NN setting without biases (2 inner neurons)
* Path 1. w = -6.35706416, b = 0.00000000, a = -1.05305639
* Path 2. w = -1.55399893, b = 0.00000000, a = 3.07464411
* -------------------------------------------
* Optimal NN setting with biases (2 inner neurons)
* Path 1. w = 6.75296220, b = -1.63419516, a = 1.71242130
* Path 2. w = 1.86917877, b = 1.09972747, a = -1.70757578
* @author Michal Kravčenko
* @date 17.7.18 -
*/

Michal Kravcenko
committed
#include "Solvers/DESolver.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
double eval_f(double x){
return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
}
double eval_df(double x){
return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
}
double eval_ddf(double x){
return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
}
double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
value += ai / (ei1);
}
return value;
}
double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
value += ai * wi * ei / (ei1 * ei1);
}
return value;
}
double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ewx, eb;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
return value;
}
//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return (ai * x * ei) / (ei1 * ei1);
}
//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
}
//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, eb, ewx;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
return -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return 1.0 / ei1;
}
//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return (wi*ei)/(ei1 * ei1);
}
//ddNN partial derivative (ai): -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, eip, ewx, eb;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
eip = std::pow(E, bi + wi * x);
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ai, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * ei)/(ei1 * ei1);
}
//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi*x);
return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi*x);
return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
}
double eval_error_function(std::vector<double> ¶meters, size_t n_inner_neurons, std::vector<double> test_points){
double output = 0.0, approx, frac = 1.0 / (test_points.size());
for(auto x: test_points){
/* governing equation */
approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
output += (0.0 - approx) * (0.0 - approx) * frac;
}
/* BC */
approx = eval_approx_f(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
approx = eval_approx_df(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
return output;
}
void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, bool opti_w, bool opti_b, double d1_s, double d1_e,
size_t test_size, double ts, double te) {
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;
double frac, alpha, x;
double grad_norm = accuracy * 10.0, mem, ai, bi, wi, error, derror, approx, xj, gamma, total_error, sk, sy, sx, beta;
double grad_norm_prev = grad_norm;
size_t i, j, iter_idx = 0;
x = (std::cos(PI - alpha * i) + 1.0) * frac + d1_s;
data_points[i] = x;
}
std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *params_current = new std::vector<double>(guess);
std::vector<double> *params_prev = new std::vector<double>(guess);
std::vector<double> *conjugate_direction_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *conjugate_direction_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *ptr_mem;
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
std::fill(conjugate_direction_current->begin(), conjugate_direction_current->end(), 0.0);
std::fill(conjugate_direction_prev->begin(), conjugate_direction_prev->end(), 0.0);
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
gamma = 1.0;
while( grad_norm > accuracy) {
iter_idx++;
/* current gradient */
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
/* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
xj = 0.0;
mem = (1.0 - eval_approx_f(xj, n_inner_neurons, *params_current));
derror = 2.0 * mem;
total_error = mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
if(opti_w){
(*gradient_current)[3 * i] -= derror * eval_approx_dw_f(xj, i, *params_current);
(*gradient_current)[3 * i + 1] -= derror * eval_approx_da_f(xj, i, *params_current);
}
if(opti_b){
(*gradient_current)[3 * i + 2] -= derror * eval_approx_db_f(xj, i, *params_current);
}
}
// for(auto e: *gradient_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
/* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
mem = (1.0 - eval_approx_df(xj, n_inner_neurons, *params_current));
derror = 2.0 * mem;
total_error += mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
if(opti_w){
(*gradient_current)[3 * i] -= derror * eval_approx_dw_df(xj, i, *params_current);
(*gradient_current)[3 * i + 1] -= derror * eval_approx_da_df(xj, i, *params_current);
}
if(opti_b){
(*gradient_current)[3 * i + 2] -= derror * eval_approx_db_df(xj, i, *params_current);
}
}
// for(auto e: *gradient_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
for(j = 0; j < data_points.size(); ++j){
xj = data_points[i];
/* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
approx= eval_approx_ddf(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current);
mem = 0.0 - approx;
error = 2.0 * mem / train_size;
for(i = 0; i < n_inner_neurons; ++i){
if(opti_w){
(*gradient_current)[3 * i] -= error * (eval_approx_dw_ddf(xj, i, *params_current) + 4.0 * eval_approx_dw_df(xj, i, *params_current) + 4.0 * eval_approx_dw_f(xj, i, *params_current));
(*gradient_current)[3 * i + 1] -= error * (eval_approx_da_ddf(xj, i, *params_current) + 4.0 * eval_approx_da_df(xj, i, *params_current) + 4.0 * eval_approx_da_f(xj, i, *params_current));
}
if(opti_b){
(*gradient_current)[3 * i + 2] -= error * (eval_approx_db_ddf(xj, i, *params_current) + 4.0 * eval_approx_db_df(xj, i, *params_current) + 4.0 * eval_approx_db_f(xj, i, *params_current));
}
}
total_error += mem * mem / train_size;
}
// /* error of the unknown function: y(x) = e^(-2x)*(3x+1) => e3 = 1/n * (e^(-2x)*(3x+1) - y(x))^2 */
// for(j = 0; j < data_points.size(); ++j) {
// xj = data_points[i];
// approx= eval_approx_f(xj, n_inner_neurons, *params_current);
// mem = (eval_f(xj) - approx);
// error = 2.0 * mem / train_size;
// for(i = 0; i < n_inner_neurons; ++i){
// if(opti_w){
// (*gradient_current)[3 * i] -= error * (eval_approx_dw_f(xj, i, *params_current));
// (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_f(xj, i, *params_current));
// }
// if(opti_b){
// (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_f(xj, i, *params_current));
// }
// }
// total_error += mem * mem / train_size;
// }
// /* error of the unknown function: y'(x) = e^(-2x)*(1-6x) => e3 = 1/n * (e^(-2x)*(1-6x) - y'(x))^2 */
// for(j = 0; j < data_points.size(); ++j) {
// xj = data_points[i];
// approx= eval_approx_df(xj, n_inner_neurons, *params_current);
// mem = (eval_df(xj) - approx);
// error = 2.0 * mem / train_size;
// for(i = 0; i < n_inner_neurons; ++i){
// if(opti_w){
// (*gradient_current)[3 * i] -= error * (eval_approx_dw_df(xj, i, *params_current));
// (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_df(xj, i, *params_current));
// }
// if(opti_b){
// (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_df(xj, i, *params_current));
// }
// }
// total_error += mem * mem / train_size;
// }
// /* error of the unknown function: y''(x) = 4e^(-2x)*(3x-2) => e3 = 4/n * (e^(-2x)*(3x-2) - y''(x))^2 */
// for(j = 0; j < data_points.size(); ++j) {
// xj = data_points[i];
// approx= eval_approx_ddf(xj, n_inner_neurons, *params_current);
// mem = (eval_ddf(xj) - approx);
// error = 2.0 * mem / train_size;
// for(i = 0; i < n_inner_neurons; ++i){
// if(opti_w){
// (*gradient_current)[3 * i] -= error * (eval_approx_dw_ddf(xj, i, *params_current));
// (*gradient_current)[3 * i + 1] -= error * (eval_approx_da_ddf(xj, i, *params_current));
// }
// if(opti_b){
// (*gradient_current)[3 * i + 2] -= error * (eval_approx_db_ddf(xj, i, *params_current));
// }
// }
//// printf("x: %f -> error: %f\n", xj, error);
// total_error += mem * mem / train_size;
// }
// for(auto e: *gradient_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
/* conjugate direction coefficient (Polak-Ribiere): <-grad_curr, -grad_curr + grad_prev> / <-grad_prev, -grad_prev >*/
/* Update of the parameters */
if(iter_idx < 10 || iter_idx % 1 == 0){
for(i = 0; i < conjugate_direction_current->size(); ++i){
(*conjugate_direction_current)[i] = - (*gradient_current)[i];
}
}
else{
/* conjugate gradient */
sk = sy = 0.0;
for(i = 0; i < conjugate_direction_current->size(); ++i){
sk += (*gradient_current)[i] * ((*gradient_current)[i] - (*gradient_prev)[i]);
sy += (*gradient_prev)[i] * (*gradient_prev)[i];
}
beta = std::max(0.0, sk / sy);
/* update of the conjugate direction */
for(i = 0; i < conjugate_direction_current->size(); ++i){
(*conjugate_direction_current)[i] = beta * (*conjugate_direction_prev)[i] - (*gradient_current)[i];
}
}
/* step length calculation */
if(iter_idx < 10){
/* fixed step length */
gamma = 0.000001;
}
else{
// /* Barzilai-Borwein */
// sk = sy = 0.0;
//
// for(i = 0; i < gradient_current->size(); ++i){
// sx = (*params_current)[i] - (*params_prev)[i];
// sg = (*conjugate_direction_current)[i] - (*conjugate_direction_prev)[i];
//
// sk += sx * sg;
// sy += sg * sg;
// }
//
// gamma = -sk / sy;
// /* Line search */
//
// gamma = line_search(10.0, conjugate_direction, *params_current, *gradient_current, n_inner_neurons, ds_00);
}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
// for(auto e: conjugate_direction){
// printf("[%10.8f]", e);
// }
// printf("\n");
//
// for(auto e: *params_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
/* norm of the gradient calculation */
grad_norm_prev = grad_norm;
/* adaptive step-length */
sk = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
sx = (*gradient_current)[i] - (*gradient_prev)[i];
sk += sx * sx;
}
sk = std::sqrt(sk);
if(sk <= 1e-3 || grad_norm < grad_norm_prev){
/* movement on a line */
/* new slope is less steep, speed up */
gamma *= 1.0005;
}
else if(grad_norm > grad_norm_prev){
/* new slope is more steep, slow down*/
gamma /= 1.0005;
}
else{
gamma /= 1.005;
}
grad_norm = 0.0;
for(auto v: *gradient_current){
grad_norm += v * v;
}
grad_norm = std::sqrt(grad_norm);
for(i = 0; i < gradient_current->size(); ++i){
// (*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
(*params_prev)[i] = (*params_current)[i] + gamma * (*conjugate_direction_current)[i];
}
// printf("\n");
/* switcheroo */
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
gradient_current = ptr_mem;
ptr_mem = params_prev;
params_prev = params_current;
params_current = ptr_mem;
ptr_mem = conjugate_direction_prev;
conjugate_direction_prev = conjugate_direction_current;
conjugate_direction_current = ptr_mem;
// for (i = 0; i < n_inner_neurons; ++i) {
// wi = (*params_current)[3 * i];
// ai = (*params_current)[3 * i + 1];
// bi = (*params_current)[3 * i + 2];
//
// printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", i + 1, wi, bi, ai);
// }
if(iter_idx % 100 == 0){
printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f (%10.8f)\n", (int)iter_idx, gamma, grad_norm, total_error, eval_error_function(*params_prev, n_inner_neurons, data_points));
// for (i = 0; i < n_inner_neurons; ++i) {
// wi = (*params_current)[3 * i];
// ai = (*params_current)[3 * i + 1];
// bi = (*params_current)[3 * i + 2];
//
// printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", i + 1, wi, bi, ai);
// }
// std::cout << "-----------------------------" << std::endl;
std::cout.flush();
}
printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n",(int) iter_idx, gamma, grad_norm, eval_error_function(*params_current, n_inner_neurons, data_points));
std::cout.flush();
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
}
printf("\n--------------------------------------------------\ntest output for gnuplot\n--------------------------------------------------\n");
if(total_error < 1e-3 || true){
/* ISOTROPIC TEST SET */
frac = (te - ts) / (test_size - 1);
for(j = 0; j < test_size; ++j){
xj = frac * j + ts;
std::cout << j + 1 << " " << xj << " " << eval_f(xj) << " " << eval_approx_f(xj, n_inner_neurons, *params_current) << " " << eval_df(xj) << " " << eval_approx_df(xj, n_inner_neurons, *params_current) << " " << eval_ddf(xj) << " " << eval_approx_ddf(xj, n_inner_neurons, *params_current) << std::endl;
}
}
/* error analysis */
double referential_error = 0.0;
mem = eval_approx_df(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
mem = eval_approx_f(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
frac = 1.0 / train_size;
for(j = 0; j < data_points.size(); ++j){
xj = data_points[j];
mem = 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + eval_approx_ddf(xj, n_inner_neurons, *params_current);
referential_error += mem * mem * frac;
printf("Total error (as used in the NN example): %10.8f\n", referential_error);
delete gradient_current;
delete gradient_prev;
delete params_current;
delete params_prev;
delete conjugate_direction_current;
delete conjugate_direction_prev;
void test_odr(size_t n_inner_neurons){
size_t n_inputs = 1;
size_t n_equations = 3;
DESolver solver_01( n_equations, n_inputs, n_inner_neurons );

Michal Kravcenko
committed
/* SETUP OF THE EQUATIONS */
MultiIndex alpha_0( n_inputs );
MultiIndex alpha_1( n_inputs );
MultiIndex alpha_2( n_inputs );
alpha_2.set_partial_derivative(0, 2);
alpha_1.set_partial_derivative(0, 1);
/* the governing differential equation */
solver_01.add_to_differential_equation( 0, alpha_2, 1.0 );
solver_01.add_to_differential_equation( 0, alpha_1, 4.0 );
solver_01.add_to_differential_equation( 0, alpha_0, 4.0 );
/* dirichlet boundary condition */
solver_01.add_to_differential_equation( 1, alpha_0, 1.0 );
/* neumann boundary condition */
solver_01.add_to_differential_equation( 2, alpha_1, 1.0 );
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
/* TRAIN DATA FOR THE GOVERNING DE */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;
/* ISOTROPIC TRAIN SET */
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
inp = {frac * i};
out = {0.0};
data_vec_g.emplace_back(std::make_pair(inp, out));
}
/* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size - 1);
// frac = 0.5 * (d1_e - d1_s);
// for(unsigned int i = 0; i < train_size; ++i){
// inp = {(std::cos(alpha * i) + 1.0) * frac + d1_s};
// out = {0.0};
// data_vec_g.emplace_back(std::make_pair(inp, out));
// }

Michal Kravcenko
committed
DataSet ds_00(&data_vec_g);
/* TRAIN DATA FOR DIRICHLET BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_y;
inp = {0.0};
out = {1.0};
data_vec_y.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
DataSet ds_01(&data_vec_y);
/* TRAIN DATA FOR NEUMANN BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
inp = {0.0};
out = {1.0};
data_vec_dy.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
DataSet ds_02(&data_vec_dy);

Michal Kravcenko
committed
/* Placing the conditions into the solver */
solver_01.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_01 );
solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_02 );

Michal Kravcenko
committed
/* PARTICLE SWARM TRAINING METHOD SETUP */
//must encapsulate each of the partial error functions
double *domain_bounds = new double[ 6 * n_inner_neurons ];
for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
domain_bounds[2 * i] = -800.0;
domain_bounds[2 * i + 1] = 800.0;
}

Michal Kravcenko
committed
double gamma = 0.5, epsilon = 0.02, delta = 0.9;

Michal Kravcenko
committed
solver_01.solve_via_particle_swarm( domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );

Michal Kravcenko
committed
NeuralNetwork *solution = solver_01.get_solution();
std::vector<double> parameters(3 * n_inner_neurons);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
for(size_t i = 0; i < n_inner_neurons; ++i){
parameters[3 * i] = weight_params->at(i);
parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
parameters[3 * i + 2] = biases_params->at(i);
}
for(unsigned int i = 0; i < n_test_points; ++i){
x = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
input[0] = x;
std::cout << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
// bool optimize_weights = true;
// bool optimize_biases = true;
// unsigned int train_size = 150;
// double accuracy = 1e-5;
// double ds = 0.0;
// double de = 4.0;
//
// unsigned int test_size = 300;
// double ts = ds;
// double te = de;
//
//// std::vector<double> init_guess = {0.35088209, -0.23738505, 0.14160885, 3.72785473, -6.45758308, 1.73769138};
// std::vector<double> init_guess(3 * n_inner_neurons);
//
// std::random_device seeder;
// std::mt19937 gen(seeder());
// std::uniform_real_distribution<double> dist(-1.0, 1.0);
// for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
// init_guess[i] = dist(gen);
// init_guess[i] = dist(gen);
// }
// if(!optimize_biases){
// for(unsigned int i = 0; i < n_inner_neurons; ++i){
// init_guess[3 * i + 2] = 0.0;
// }
// }
// if(!optimize_weights){
// for(unsigned int i = 0; i < n_inner_neurons; ++i){
// init_guess[3 * i] = 0.0;
// init_guess[3 * i + 1] = 0.0;
// }
// }
//
// test_analytical_gradient_y(init_guess, accuracy, n_inner_neurons, train_size, optimize_weights, optimize_biases, ds, de, test_size, ts, te);