Newer
Older
/**
* Example solving the following ODE:
*
* g(t) = (d^2/d^t)y(t) + 4 (d/dt)y(t) + 4y(t) = 0, for t in [0, 4]
* y(0) = 1
* (d/dt)y(0) = 1
*
* -------------------------------------------
* Analytical solution: e^(-2x) * (3x + 1)
* NN representation: sum over [a_i * (1 + e^(-x * w_i + b_i))^(-1)]
* -------------------------------------------
* Optimal NN setting with biases (2 inner neurons)

Michal Kravcenko
committed
* Path 1. w = -1.66009975, b = -0.40767447, a = 2.46457042
* Path 2. w = -4.38622765, b = 2.75707816, a = -8.04752347
* @author Michal Kravčenko
* @date 17.7.18 -
*/

Michal Kravcenko
committed
#include "Solvers/DESolver.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
double eval_f(double x){
return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
}
double eval_df(double x){
return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
}
double eval_ddf(double x){
return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
}
double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
value += ai / (ei1);
}
return value;
}
double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
value += ai * wi * ei / (ei1 * ei1);
}
return value;
}
double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ewx, eb;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
return value;
}
//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return (ai * x * ei) / (ei1 * ei1);
}
//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
}
//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, eb, ewx;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
return -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return 1.0 / ei1;
}
//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return (wi*ei)/(ei1 * ei1);
}
//ddNN partial derivative (ai): -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, eip, ewx, eb;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
eip = std::pow(E, bi + wi * x);
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ai, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * ei)/(ei1 * ei1);
}
//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi*x);
return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi*x);
return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
}
double eval_error_function(std::vector<double> ¶meters, size_t n_inner_neurons, std::vector<double> test_points){
double output = 0.0, approx, frac = 1.0 / (test_points.size());
for(auto x: test_points){
/* governing equation */
approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
output += (0.0 - approx) * (0.0 - approx) * frac;
}
/* BC */
approx = eval_approx_f(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
approx = eval_approx_df(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
return output;
}
void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, bool opti_w, bool opti_b, double d1_s, double d1_e,
size_t test_size, double ts, double te) {
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
double grad_norm = accuracy * 10.0, mem, ai, bi, wi, error, derror, approx, xj, gamma, total_error, sk, sy, sx, sg, beta;
double grad_norm_prev = grad_norm;
size_t i, j, iter_idx = 0;

Michal Kravcenko
committed
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
data_points[i] = frac * i;
}
// /* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size );
// frac = 0.5 * (d1_e - d1_s);
// for(i = 0; i < train_size; ++i){
// x = (std::cos(PI - alpha * i) + 1.0) * frac + d1_s;
// data_points[i] = x;

Michal Kravcenko
committed
// DataSet ds(0.0, 4.0, train_size, 0.0);
std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *params_current = new std::vector<double>(guess);
std::vector<double> *params_prev = new std::vector<double>(guess);
std::vector<double> *conjugate_direction_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *conjugate_direction_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *ptr_mem;
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
std::fill(conjugate_direction_current->begin(), conjugate_direction_current->end(), 0.0);
std::fill(conjugate_direction_prev->begin(), conjugate_direction_prev->end(), 0.0);
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
gamma = 1.0;
while( grad_norm > accuracy) {
iter_idx++;
/* current gradient */
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
/* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
xj = 0.0;
mem = (1.0 - eval_approx_f(xj, n_inner_neurons, *params_current));
derror = 2.0 * mem;
total_error = mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
if(opti_w){
(*gradient_current)[3 * i] -= derror * eval_approx_dw_f(xj, i, *params_current);
(*gradient_current)[3 * i + 1] -= derror * eval_approx_da_f(xj, i, *params_current);
}
if(opti_b){
(*gradient_current)[3 * i + 2] -= derror * eval_approx_db_f(xj, i, *params_current);
}
}
// for(auto e: *gradient_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
/* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
mem = (1.0 - eval_approx_df(xj, n_inner_neurons, *params_current));
derror = 2.0 * mem;
total_error += mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
if(opti_w){
(*gradient_current)[3 * i] -= derror * eval_approx_dw_df(xj, i, *params_current);
(*gradient_current)[3 * i + 1] -= derror * eval_approx_da_df(xj, i, *params_current);
}
if(opti_b){
(*gradient_current)[3 * i + 2] -= derror * eval_approx_db_df(xj, i, *params_current);
}
}
// for(auto e: *gradient_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
for(j = 0; j < data_points.size(); ++j){

Michal Kravcenko
committed
xj = data_points[j];
// xj = ds.get_data()->at(j).first[0];
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
/* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
approx= eval_approx_ddf(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current);
mem = 0.0 - approx;
error = 2.0 * mem / train_size;
for(i = 0; i < n_inner_neurons; ++i){
if(opti_w){
(*gradient_current)[3 * i] -= error * (eval_approx_dw_ddf(xj, i, *params_current) + 4.0 * eval_approx_dw_df(xj, i, *params_current) + 4.0 * eval_approx_dw_f(xj, i, *params_current));
(*gradient_current)[3 * i + 1] -= error * (eval_approx_da_ddf(xj, i, *params_current) + 4.0 * eval_approx_da_df(xj, i, *params_current) + 4.0 * eval_approx_da_f(xj, i, *params_current));
}
if(opti_b){
(*gradient_current)[3 * i + 2] -= error * (eval_approx_db_ddf(xj, i, *params_current) + 4.0 * eval_approx_db_df(xj, i, *params_current) + 4.0 * eval_approx_db_f(xj, i, *params_current));
}
}
total_error += mem * mem / train_size;
}
// for(auto e: *gradient_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
/* conjugate direction coefficient (Polak-Ribiere): <-grad_curr, -grad_curr + grad_prev> / <-grad_prev, -grad_prev >*/
/* Update of the parameters */
if(iter_idx < 10 || iter_idx % 1 == 0){
for(i = 0; i < conjugate_direction_current->size(); ++i){
(*conjugate_direction_current)[i] = - (*gradient_current)[i];
}
}
else{
/* conjugate gradient */
sk = sy = 0.0;
for(i = 0; i < conjugate_direction_current->size(); ++i){
sk += (*gradient_current)[i] * ((*gradient_current)[i] - (*gradient_prev)[i]);
sy += (*gradient_prev)[i] * (*gradient_prev)[i];
}
beta = std::max(0.0, sk / sy);
/* update of the conjugate direction */
for(i = 0; i < conjugate_direction_current->size(); ++i){
(*conjugate_direction_current)[i] = beta * (*conjugate_direction_prev)[i] - (*gradient_current)[i];
}
}
/* step length calculation */
if(iter_idx < 10){
/* fixed step length */
gamma = 0.000001;
}
else{
// /* Barzilai-Borwein */
// sk = sy = 0.0;
//
// for(i = 0; i < gradient_current->size(); ++i){
// sx = (*params_current)[i] - (*params_prev)[i];
// sg = (*conjugate_direction_current)[i] - (*conjugate_direction_prev)[i];
//
// sk += sx * sg;
// sy += sg * sg;
// }
//
// gamma = -sk / sy;
// /* Line search */
//
// gamma = line_search(10.0, conjugate_direction, *params_current, *gradient_current, n_inner_neurons, ds_00);
}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
// for(auto e: conjugate_direction){
// printf("[%10.8f]", e);
// }
// printf("\n");
//
// for(auto e: *params_current){
// printf("[%10.8f]", e);
// }
// printf("\n");
/* norm of the gradient calculation */
grad_norm_prev = grad_norm;
/* adaptive step-length */
sk = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
sx = (*gradient_current)[i] - (*gradient_prev)[i];
sk += sx * sx;
}
sk = std::sqrt(sk);
if(sk <= 1e-3 || grad_norm < grad_norm_prev){
/* movement on a line */
/* new slope is less steep, speed up */
gamma *= 1.0005;
}
else if(grad_norm > grad_norm_prev){
/* new slope is more steep, slow down*/
gamma /= 1.0005;
}
else{
gamma /= 1.005;
}
grad_norm = 0.0;
for(auto v: *gradient_current){
grad_norm += v * v;
}
grad_norm = std::sqrt(grad_norm);
for(i = 0; i < gradient_current->size(); ++i){
// (*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
(*params_prev)[i] = (*params_current)[i] + gamma * (*conjugate_direction_current)[i];
}
// printf("\n");
/* switcheroo */
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
gradient_current = ptr_mem;
ptr_mem = params_prev;
params_prev = params_current;
params_current = ptr_mem;
ptr_mem = conjugate_direction_prev;
conjugate_direction_prev = conjugate_direction_current;
conjugate_direction_current = ptr_mem;
// for (i = 0; i < n_inner_neurons; ++i) {
// wi = (*params_current)[3 * i];
// ai = (*params_current)[3 * i + 1];
// bi = (*params_current)[3 * i + 2];
//
// printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", i + 1, wi, bi, ai);
// }
if(iter_idx % 100 == 0){

Michal Kravcenko
committed
printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f (%10.8f)\r", (int)iter_idx, gamma, grad_norm, total_error, eval_error_function(*params_prev, n_inner_neurons, data_points));
// for (i = 0; i < n_inner_neurons; ++i) {
// wi = (*params_current)[3 * i];
// ai = (*params_current)[3 * i + 1];
// bi = (*params_current)[3 * i + 2];
//
// printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", i + 1, wi, bi, ai);
// }
// std::cout << "-----------------------------" << std::endl;
std::cout.flush();
}
printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n",(int) iter_idx, gamma, grad_norm, eval_error_function(*params_current, n_inner_neurons, data_points));
std::cout.flush();
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
}
printf("\n--------------------------------------------------\ntest output for gnuplot\n--------------------------------------------------\n");
if(total_error < 1e-3 || true){
/* ISOTROPIC TEST SET */
frac = (te - ts) / (test_size - 1);
for(j = 0; j < test_size; ++j){
xj = frac * j + ts;
std::cout << j + 1 << " " << xj << " " << eval_f(xj) << " " << eval_approx_f(xj, n_inner_neurons, *params_current) << " " << eval_df(xj) << " " << eval_approx_df(xj, n_inner_neurons, *params_current) << " " << eval_ddf(xj) << " " << eval_approx_ddf(xj, n_inner_neurons, *params_current) << std::endl;
}
}
/* error analysis */
double referential_error = 0.0;
mem = eval_approx_df(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
mem = eval_approx_f(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
frac = 1.0 / train_size;
for(j = 0; j < data_points.size(); ++j){

Michal Kravcenko
committed
// xj = ds.get_data()->at(j).first[0];
xj = data_points[i];
mem = 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + eval_approx_ddf(xj, n_inner_neurons, *params_current);
referential_error += mem * mem * frac;
printf("Total error (as used in the NN example): %10.8f\n", referential_error);
delete gradient_current;
delete gradient_prev;
delete params_current;
delete params_prev;
delete conjugate_direction_current;
delete conjugate_direction_prev;

Michal Kravcenko
committed
void test_odr(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){

Michal Kravcenko
committed
/* SOLVER SETUP */
size_t n_inputs = 1;
size_t n_equations = 3;
DESolver solver_01( n_equations, n_inputs, n_inner_neurons );

Michal Kravcenko
committed
/* SETUP OF THE EQUATIONS */
MultiIndex alpha_0( n_inputs );
MultiIndex alpha_1( n_inputs );
MultiIndex alpha_2( n_inputs );
alpha_2.set_partial_derivative(0, 2);
alpha_1.set_partial_derivative(0, 1);
/* the governing differential equation */
solver_01.add_to_differential_equation( 0, alpha_2, 1.0 );
solver_01.add_to_differential_equation( 0, alpha_1, 4.0 );
solver_01.add_to_differential_equation( 0, alpha_0, 4.0 );
/* dirichlet boundary condition */
solver_01.add_to_differential_equation( 1, alpha_0, 1.0 );
/* neumann boundary condition */
solver_01.add_to_differential_equation( 2, alpha_1, 1.0 );
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
double d1_s = ds, d1_e = de, frac;

Michal Kravcenko
committed
/* TRAIN DATA FOR THE GOVERNING DE */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;

Michal Kravcenko
committed
/* ISOTROPIC TRAIN SET */
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
inp = {frac * i};
out = {0.0};
data_vec_g.emplace_back(std::make_pair(inp, out));
}
/* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size - 1);
// frac = 0.5 * (d1_e - d1_s);
// for(unsigned int i = 0; i < train_size; ++i){
// inp = {(std::cos(alpha * i) + 1.0) * frac + d1_s};
// out = {0.0};
// data_vec_g.emplace_back(std::make_pair(inp, out));
// }

Michal Kravcenko
committed
DataSet ds_00(&data_vec_g);
/* TRAIN DATA FOR DIRICHLET BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_y;
inp = {0.0};
out = {1.0};
data_vec_y.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
DataSet ds_01(&data_vec_y);
/* TRAIN DATA FOR NEUMANN BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
inp = {0.0};
out = {1.0};
data_vec_dy.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
DataSet ds_02(&data_vec_dy);

Michal Kravcenko
committed
/* Placing the conditions into the solver */
solver_01.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_01 );
solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_02 );

Michal Kravcenko
committed
/* PARTICLE SWARM TRAINING METHOD SETUP */
//must encapsulate each of the partial error functions
double *domain_bounds = new double[ 6 * n_inner_neurons ];
for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
domain_bounds[2 * i] = -800.0;
domain_bounds[2 * i + 1] = 800.0;
}

Michal Kravcenko
committed
double c1 = 0.05, c2 = 0.0, w = 0.3;

Michal Kravcenko
committed
double gamma = 0.5, epsilon = 0.0000000000002, delta = 1.1;

Michal Kravcenko
committed

Michal Kravcenko
committed
solver_01.solve_via_particle_swarm( domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );

Michal Kravcenko
committed
NeuralNetwork *solution = solver_01.get_solution();
std::vector<double> parameters(3 * n_inner_neurons);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
for(size_t i = 0; i < n_inner_neurons; ++i){
parameters[3 * i] = weight_params->at(i);
parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
parameters[3 * i + 2] = biases_params->at(i);
}
for(unsigned int i = 0; i < n_test_points; ++i){
x = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
input[0] = x;
std::cout << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;

Michal Kravcenko
committed
unsigned int n_inner_neurons = 2;
unsigned int train_size = 200;
double accuracy = 1e-4;
double ds = 0.0;
double de = 4.0;

Michal Kravcenko
committed
unsigned int test_size = 300;
double ts = ds;
double te = de + 2;
size_t particle_swarm_max_iters = 1000;
size_t n_particles = 10;
test_odr(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
// bool optimize_weights = true;
// bool optimize_biases = true;
//
//// std::vector<double> init_guess = {0.35088209, -0.23738505, 0.14160885, 3.72785473, -6.45758308, 1.73769138};
// std::vector<double> init_guess(3 * n_inner_neurons);
//
// std::random_device seeder;
// std::mt19937 gen(seeder());
// std::uniform_real_distribution<double> dist(-1.0, 1.0);
// for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){
// init_guess[i] = dist(gen);
// init_guess[i] = dist(gen);
// }
// if(!optimize_biases){
// for(unsigned int i = 0; i < n_inner_neurons; ++i){
// init_guess[3 * i + 2] = 0.0;
// }
// }
// if(!optimize_weights){
// for(unsigned int i = 0; i < n_inner_neurons; ++i){
// init_guess[3 * i] = 0.0;
// init_guess[3 * i + 1] = 0.0;
// }
// }
//
// test_analytical_gradient_y(init_guess, accuracy, n_inner_neurons, train_size, optimize_weights, optimize_biases, ds, de, test_size, ts, te);