Newer
Older
/**
* Example solving the following ODE:
*
* g(t) = (d^2/d^t)y(t) + 4 (d/dt)y(t) + 4y(t) = 0, for t in [0, 4]
* y(0) = 1
* (d/dt)y(0) = 1
*
* -------------------------------------------
* Analytical solution: e^(-2x) * (3x + 1)
* NN representation: sum over [a_i * (1 + e^(-x * w_i + b_i))^(-1)]
* -------------------------------------------
* Optimal NN setting with biases (2 inner neurons)

Michal Kravcenko
committed
* Path 1. w = -1.66009975, b = -0.40767447, a = 2.46457042
* Path 2. w = -4.38622765, b = 2.75707816, a = -8.04752347
* @author Michal Kravčenko
* @date 17.7.18 -
*/

Michal Kravcenko
committed
#include "Solvers/DESolver.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
double eval_f(double x){
return std::pow(E, -2.0 * x) * (3.0 * x + 1.0);
}
double eval_df(double x){
return std::pow(E, -2.0 * x) * (1.0 - 6.0 * x);
}
double eval_ddf(double x){
return 4.0 * std::pow(E, -2.0 * x) * (3.0 * x - 2.0);
}
double eval_approx_f(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
value += ai / (ei1);
}
return value;
}
double eval_approx_df(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ei, ei1;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
value += ai * wi * ei / (ei1 * ei1);
}
return value;
}
double eval_approx_ddf(double x, size_t n_inner_neurons, std::vector<double> ¶meters){
double value= 0.0, wi, ai, bi, ewx, eb;
for(size_t i = 0; i < n_inner_neurons; ++i){
wi = parameters[3 * i];
ai = parameters[3 * i + 1];
bi = parameters[3 * i + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
value += -(ai*wi*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
return value;
}
//NN partial derivative (wi): (ai * x * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_dw_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return (ai * x * ei) / (ei1 * ei1);
}
//dNN partial derivative (wi): -(a w x e^(b - w x))/(e^(b - w x) + 1)^2 + (2 a w x e^(2 b - 2 w x))/(e^(b - w x) + 1)^3 + (a e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_dw_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * wi * x * ei)/(ei1 * ei1) + (2.0*ai*wi*x*ei*ei)/(ei1 * ei1 * ei1) + (ai* ei)/(ei1 * ei1);
}
//ddNN partial derivative (wi): -(a w^2 x e^(b + 2 w x))/(e^b + e^(w x))^3 - (a w^2 x e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3 + (3 a w^2 x e^(b + 2 w x) (e^(w x) - e^b))/(e^b + e^(w x))^4 - (2 a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_dw_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, ai, bi, eb, ewx;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
return -(ai*wi*wi* x * eb*ewx*ewx)/((eb + ewx)*(eb + ewx)*(eb + ewx)) - (ai*wi*wi*x*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)) + (3*ai*wi*wi*x*eb*ewx*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx)*(eb + ewx)) - (2*ai*wi*eb*ewx*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (ai): (1 + e^(-x * wi + bi))^(-1)
double eval_approx_da_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return 1.0 / ei1;
}
//dNN partial derivative (ai): (w e^(b - w x))/(e^(b - w x) + 1)^2
double eval_approx_da_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ei1;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return (wi*ei)/(ei1 * ei1);
}
//ddNN partial derivative (ai): -(w^2 e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_da_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, eip, ewx, eb;
wi = parameters[3 * neuron_idx];
bi = parameters[3 * neuron_idx + 2];
eip = std::pow(E, bi + wi * x);
eb = std::pow(E, bi);
ewx = std::pow(E, wi * x);
return -(wi*wi*eip*(ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
}
//NN partial derivative (bi): -(ai * e^(bi - wi * x)) * (e^(bi - wi * x) + 1)^(-2)
double eval_approx_db_f(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ei, ai, ei1;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
ei = std::pow(E, bi - wi * x);
ei1 = ei + 1.0;
return -(ai * ei)/(ei1 * ei1);
}
//dNN partial derivative (bi): (a w e^(b + w x) (e^(w x) - e^b))/(e^b + e^(w x))^3
double eval_approx_db_df(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi*x);
return (ai* wi* eb*ewx* (ewx - eb))/((eb + ewx)*(eb + ewx)*(eb + ewx));
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
//ddNN partial derivative (bi): -(a w^2 e^(b + w x) (-4 e^(b + w x) + e^(2 b) + e^(2 w x)))/(e^b + e^(w x))^4
double eval_approx_db_ddf(double x, size_t neuron_idx, std::vector<double> ¶meters){
double wi, bi, ai, ewx, eb;
wi = parameters[3 * neuron_idx];
ai = parameters[3 * neuron_idx + 1];
bi = parameters[3 * neuron_idx + 2];
eb = std::pow(E, bi);
ewx = std::pow(E, wi*x);
return -(ai* wi*wi* eb*ewx* (-4.0* eb*ewx + eb*eb + ewx*ewx))/((eb +ewx)*(eb +ewx)*(eb +ewx)*(eb +ewx));
}
double eval_error_function(std::vector<double> ¶meters, size_t n_inner_neurons, std::vector<double> test_points){
double output = 0.0, approx, frac = 1.0 / (test_points.size());
for(auto x: test_points){
/* governing equation */
approx = 4.0 * eval_approx_f(x, n_inner_neurons, parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, parameters) + eval_approx_ddf(x, n_inner_neurons, parameters);
output += (0.0 - approx) * (0.0 - approx) * frac;
}
/* BC */
approx = eval_approx_f(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
approx = eval_approx_df(0.0, n_inner_neurons, parameters);
output += (1.0 - approx) * (1.0 - approx);
return output;
}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
void eval_step_size_simple(double &gamma, double val, double prev_val, double sk, double grad_norm, double grad_norm_prev){
if(val > prev_val){
gamma *= 0.99999;
}
if(sk <= 1e-3 || grad_norm < grad_norm_prev){
/* movement on a line */
/* new slope is less steep, speed up */
gamma *= 1.0005;
}
else if(grad_norm > grad_norm_prev){
/* new slope is more steep, slow down*/
gamma /= 1.0005;
}
else{
gamma /= 1.005;
}
// gamma *= 0.999999;
}
void eval_step_size_mk( double &gamma, double beta, double &c, double grad_norm_prev, double grad_norm, double fi, double fim ){
if( fi > fim )
{
c /= 1.0000005;
}
else if( fi < fim )
{
c *= 1.0000005;
}
gamma *= std::pow( c, 1.0 - 2.0 * beta) * std::pow( grad_norm_prev / grad_norm, 1.0 / c );
}
double calculate_gradient( std::vector<double> &data_points, size_t n_inner_neurons, std::vector<double> *parameters, std::vector<double> *gradient ){
size_t i, j;
double x, mem, derror, total_error, approx;
size_t train_size = data_points.size();
/* error boundary condition: y(0) = 1 => e1 = (1 - y(0))^2 */
x = 0.0;
mem = (1.0 - eval_approx_f(x, n_inner_neurons, *parameters));
derror = 2.0 * mem;
total_error = mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
(*gradient)[3 * i] -= derror * eval_approx_dw_f(x, i, *parameters);
(*gradient)[3 * i + 1] -= derror * eval_approx_da_f(x, i, *parameters);
(*gradient)[3 * i + 2] -= derror * eval_approx_db_f(x, i, *parameters);
}
/* error boundary condition: y'(0) = 1 => e2 = (1 - y'(0))^2 */
mem = (1.0 - eval_approx_df(x, n_inner_neurons, *parameters));
derror = 2.0 * mem;
total_error += mem * mem;
for(i = 0; i < n_inner_neurons; ++i){
(*gradient)[3 * i] -= derror * eval_approx_dw_df(x, i, *parameters);
(*gradient)[3 * i + 1] -= derror * eval_approx_da_df(x, i, *parameters);
(*gradient)[3 * i + 2] -= derror * eval_approx_db_df(x, i, *parameters);
}
for(j = 0; j < data_points.size(); ++j){
x = data_points[j];
/* error of the governing equation: y''(x) + 4y'(x) + 4y(x) = 0 => e3 = 1/n * (0 - y''(x) - 4y'(x) - 4y(x))^2 */
approx= eval_approx_ddf(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_df(x, n_inner_neurons, *parameters) + 4.0 * eval_approx_f(x, n_inner_neurons, *parameters);
mem = 0.0 - approx;
derror = 2.0 * mem / train_size;
for(i = 0; i < n_inner_neurons; ++i){
(*gradient)[3 * i] -= derror * (eval_approx_dw_ddf(x, i, *parameters) + 4.0 * eval_approx_dw_df(x, i, *parameters) + 4.0 * eval_approx_dw_f(x, i, *parameters));
(*gradient)[3 * i + 1] -= derror * (eval_approx_da_ddf(x, i, *parameters) + 4.0 * eval_approx_da_df(x, i, *parameters) + 4.0 * eval_approx_da_f(x, i, *parameters));
(*gradient)[3 * i + 2] -= derror * (eval_approx_db_ddf(x, i, *parameters) + 4.0 * eval_approx_db_df(x, i, *parameters) + 4.0 * eval_approx_db_f(x, i, *parameters));
}
total_error += mem * mem / train_size;
}
return total_error;
}
void test_analytical_gradient_y(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, double d1_s, double d1_e,
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
double grad_norm = accuracy * 10.0, mem, ai, bi, wi, error, derror, approx, xj, gamma, total_error, sk, sy, sx, sg, beta;
double grad_norm_prev = grad_norm;
size_t i, j, iter_idx = 0;

Michal Kravcenko
committed
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
data_points[i] = frac * i;
}
// /* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size );
// frac = 0.5 * (d1_e - d1_s);
// for(i = 0; i < train_size; ++i){
// x = (std::cos(PI - alpha * i) + 1.0) * frac + d1_s;
// data_points[i] = x;

Michal Kravcenko
committed
// DataSet ds(0.0, 4.0, train_size, 0.0);
std::vector<double> *gradient_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *gradient_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *params_current = new std::vector<double>(guess);
std::vector<double> *params_prev = new std::vector<double>(guess);
std::vector<double> *conjugate_direction_current = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *conjugate_direction_prev = new std::vector<double>(3 * n_inner_neurons);
std::vector<double> *ptr_mem;
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
std::fill(conjugate_direction_current->begin(), conjugate_direction_current->end(), 0.0);
std::fill(conjugate_direction_prev->begin(), conjugate_direction_prev->end(), 0.0);
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
double prev_val, val = 0.0, c = 2.0;
prev_val = val;
grad_norm_prev = grad_norm;
/* reset of the current gradient */
std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
val = calculate_gradient( data_points, n_inner_neurons, params_current, gradient_current );
grad_norm = 0.0;
for(auto v: *gradient_current){
grad_norm += v * v;
grad_norm = std::sqrt(grad_norm);
/* Update of the parameters */
/* step length calculation */
if(iter_idx < 10 || iter_idx % 100 == 0){
/* norm of the gradient calculation */
sk = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
sx = (*gradient_current)[i] - (*gradient_prev)[i];
sk += sx * sx;
}
sk = std::sqrt(sk);
/* angle between two consecutive gradients */
double beta = 0.0, sx = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
sx += (gradient_current->at( i ) * gradient_prev->at( i ));
}
sx /= grad_norm * grad_norm_prev;
beta = std::sqrt(std::acos( sx ) / PI);
// eval_step_size_simple( gamma, val, prev_val, sk, grad_norm, grad_norm_prev );
eval_step_size_mk( gamma, beta, c, grad_norm_prev, grad_norm, val, prev_val );
for(i = 0; i < gradient_current->size(); ++i){
(*params_prev)[i] = (*params_current)[i] - gamma * (*gradient_current)[i];
}
/* switcheroo */
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
gradient_current = ptr_mem;
ptr_mem = params_prev;
params_prev = params_current;
params_current = ptr_mem;
if(iter_idx % 1 == 0){
printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r", (int)iter_idx, gamma, c, grad_norm, val);
printf("Iteration %12d. Step size: %15.8f, C: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r\n", (int)iter_idx, gamma, c, grad_norm, val);
for (i = 0; i < n_inner_neurons; ++i) {
wi = (*params_current)[3 * i];
ai = (*params_current)[3 * i + 1];
bi = (*params_current)[3 * i + 2];
printf("Path %3d. w = %15.8f, b = %15.8f, a = %15.8f\n", (int)(i + 1), wi, bi, ai);
}
printf("\n--------------------------------------------------\ntest output for gnuplot\n--------------------------------------------------\n");
if(total_error < 1e-3 || true){
/* ISOTROPIC TEST SET */
frac = (te - ts) / (test_size - 1);
for(j = 0; j < test_size; ++j){
xj = frac * j + ts;
std::cout << j + 1 << " " << xj << " " << eval_f(xj) << " " << eval_approx_f(xj, n_inner_neurons, *params_current) << " " << eval_df(xj) << " " << eval_approx_df(xj, n_inner_neurons, *params_current) << " " << eval_ddf(xj) << " " << eval_approx_ddf(xj, n_inner_neurons, *params_current) << std::endl;
}
}
/* error analysis */
double referential_error = 0.0;
mem = eval_approx_df(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
mem = eval_approx_f(0.0, n_inner_neurons, *params_current);
referential_error += (mem - 1.0) * (mem - 1.0);
frac = 1.0 / train_size;
for(j = 0; j < data_points.size(); ++j){

Michal Kravcenko
committed
// xj = ds.get_data()->at(j).first[0];
xj = data_points[i];
mem = 4.0 * eval_approx_f(xj, n_inner_neurons, *params_current) + 4.0 * eval_approx_df(xj, n_inner_neurons, *params_current) + eval_approx_ddf(xj, n_inner_neurons, *params_current);
referential_error += mem * mem * frac;
printf("Total error (as used in the NN example): %10.8f\n", referential_error);
delete gradient_current;
delete gradient_prev;
delete params_current;
delete params_prev;
delete conjugate_direction_current;
delete conjugate_direction_prev;
void test_ode(double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te, size_t max_iters, size_t n_particles){

Michal Kravcenko
committed
/* SOLVER SETUP */
size_t n_inputs = 1;
size_t n_equations = 3;
DESolver solver_01( n_equations, n_inputs, n_inner_neurons );

Michal Kravcenko
committed
/* SETUP OF THE EQUATIONS */
MultiIndex alpha_0( n_inputs );
MultiIndex alpha_1( n_inputs );
MultiIndex alpha_2( n_inputs );
alpha_2.set_partial_derivative(0, 2);
alpha_1.set_partial_derivative(0, 1);
/* the governing differential equation */
solver_01.add_to_differential_equation( 0, alpha_2, 1.0 );
solver_01.add_to_differential_equation( 0, alpha_1, 4.0 );
solver_01.add_to_differential_equation( 0, alpha_0, 4.0 );
/* dirichlet boundary condition */
solver_01.add_to_differential_equation( 1, alpha_0, 1.0 );
/* neumann boundary condition */
solver_01.add_to_differential_equation( 2, alpha_1, 1.0 );
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;

Michal Kravcenko
committed
double d1_s = ds, d1_e = de, frac;

Michal Kravcenko
committed
/* TRAIN DATA FOR THE GOVERNING DE */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_g;

Michal Kravcenko
committed
std::vector<double> test_points(train_size);

Michal Kravcenko
committed
/* ISOTROPIC TRAIN SET */
frac = (d1_e - d1_s) / (train_size - 1);
for(unsigned int i = 0; i < train_size; ++i){
inp = {frac * i};
out = {0.0};
data_vec_g.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
test_points[i] = inp[0];
}
/* CHEBYSCHEV TRAIN SET */
// alpha = PI / (train_size - 1);
// frac = 0.5 * (d1_e - d1_s);
// for(unsigned int i = 0; i < train_size; ++i){
// inp = {(std::cos(alpha * i) + 1.0) * frac + d1_s};
// out = {0.0};
// data_vec_g.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
//
// test_points[i] = inp[0];

Michal Kravcenko
committed
DataSet ds_00(&data_vec_g);
/* TRAIN DATA FOR DIRICHLET BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_y;
inp = {0.0};
out = {1.0};
data_vec_y.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
DataSet ds_01(&data_vec_y);
/* TRAIN DATA FOR NEUMANN BC */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_dy;
inp = {0.0};
out = {1.0};
data_vec_dy.emplace_back(std::make_pair(inp, out));

Michal Kravcenko
committed
DataSet ds_02(&data_vec_dy);

Michal Kravcenko
committed
/* Placing the conditions into the solver */
solver_01.set_error_function( 0, ErrorFunctionType::ErrorFuncMSE, &ds_00 );
solver_01.set_error_function( 1, ErrorFunctionType::ErrorFuncMSE, &ds_01 );
solver_01.set_error_function( 2, ErrorFunctionType::ErrorFuncMSE, &ds_02 );

Michal Kravcenko
committed
std::vector<double> params(3 * n_inner_neurons), params_analytical(3 * n_inner_neurons);
std::random_device seeder;
std::mt19937 gen(seeder());
std::uniform_real_distribution<double> dist(-10.0, 10.0);
std::vector<double> input(1);
for( size_t testi = 0; testi < 50; ++testi ){
double test_error_eq1 = 0.0, total_error = 0.0;
for(size_t i = 0; i < params.size(); ++i){
params[i] = dist(gen);
}
for(size_t i = 0; i < n_inner_neurons; ++i){
params_analytical[3 * i] = params[i];
params_analytical[3 * i + 1] = params[n_inner_neurons + i];
params_analytical[3 * i + 2] = params[2 * n_inner_neurons + i];
}
for(auto d: *ds_00.get_data()){
input = d.first;
double x = input[0];
double analytical_value_f = eval_approx_f(x, n_inner_neurons, params_analytical);
double analytical_value_df = eval_approx_df(x, n_inner_neurons, params_analytical);
double analytical_value_ddf = eval_approx_ddf(x, n_inner_neurons, params_analytical);
double de_solver_value_eq1 = solver_01.eval_equation(0, ¶ms, input);

Michal Kravcenko
committed
double analytical_value_eq1 = 4 * analytical_value_f + 4 * analytical_value_df + analytical_value_ddf;
test_error_eq1 += (de_solver_value_eq1 - analytical_value_eq1) * (de_solver_value_eq1 - analytical_value_eq1);
}
input[0] = 0.0;
double de_solver_value_eq2 = solver_01.eval_equation(1, ¶ms, input);

Michal Kravcenko
committed
double analytical_value_eq2 = eval_approx_f(0.0, n_inner_neurons, params_analytical);
double test_error_eq2 = (de_solver_value_eq2 - analytical_value_eq2) * (de_solver_value_eq2 - analytical_value_eq2);
double de_solver_value_eq3 = solver_01.eval_equation(2, ¶ms, input);

Michal Kravcenko
committed
double analytical_value_eq3 = eval_approx_df(0.0, n_inner_neurons, params_analytical);
double test_error_eq3 = (de_solver_value_eq3 - analytical_value_eq3) * (de_solver_value_eq3 - analytical_value_eq3);
double total_error_de_solver = solver_01.eval_total_error(params);
double total_error_analytical = eval_error_function(params_analytical, n_inner_neurons, test_points);
printf("\tRepresentation test %6d, error of eq1: %10.8f, error of eq2: %10.8f, error of eq3: %10.8f, total error: %10.8f\n", (int)testi, std::sqrt(test_error_eq1), std::sqrt(test_error_eq2), std::sqrt(test_error_eq3), (total_error_analytical - total_error_de_solver) * (total_error_analytical - total_error_de_solver));
}

Michal Kravcenko
committed
/* PARTICLE SWARM TRAINING METHOD SETUP */
//must encapsulate each of the partial error functions
double *domain_bounds = new double[ 6 * n_inner_neurons ];
for(unsigned int i = 0; i < 3 * n_inner_neurons; ++i){

Michal Kravcenko
committed
domain_bounds[2 * i] = -10.0;
domain_bounds[2 * i + 1] = 10.0;
double c1 = 1.7, c2 = 1.7, w = 0.7;

Michal Kravcenko
committed
double gamma = 0.5, epsilon = 0.02, delta = 1.0;

Michal Kravcenko
committed

Michal Kravcenko
committed
solver_01.solve_via_particle_swarm( domain_bounds, c1, c2, w, n_particles, max_iters, gamma, epsilon, delta );

Michal Kravcenko
committed
NeuralNetwork *solution = solver_01.get_solution( alpha_0 );
std::vector<double> parameters(3 * n_inner_neurons);//w1, a1, b1, w2, a2, b2, ... , wm, am, bm
std::vector<double> *weight_params = solution->get_parameter_ptr_weights();
std::vector<double> *biases_params = solution->get_parameter_ptr_biases();
for(size_t i = 0; i < n_inner_neurons; ++i){
parameters[3 * i] = weight_params->at(i);
parameters[3 * i + 1] = weight_params->at(i + n_inner_neurons);
parameters[3 * i + 2] = biases_params->at(i);
}

Michal Kravcenko
committed
std::vector<double> output(1);
for(unsigned int i = 0; i < n_test_points; ++i){
x = i * ((d1_e - d1_s) / (n_test_points - 1)) + d1_s;
input[0] = x;
std::cout << i + 1 << " " << x << " " << std::pow(E, -2*x) * (3*x + 1)<< " " << output[0] << " " << std::pow(E, -2*x) * (1 - 6*x)<< " " << eval_approx_df(x, n_inner_neurons, parameters) << " " << 4 * std::pow(E, -2*x) * (3*x - 2)<< " " << eval_approx_ddf(x, n_inner_neurons, parameters) << std::endl;

Michal Kravcenko
committed
unsigned int n_inner_neurons = 2;
unsigned int train_size = 10;

Michal Kravcenko
committed
double accuracy = 1e-4;
double ds = 0.0;
double de = 4.0;

Michal Kravcenko
committed
unsigned int test_size = 300;
double ts = ds;
double te = de + 2;
// size_t particle_swarm_max_iters = 1000;
// size_t n_particles = 100;
// test_ode(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
bool optimize_weights = true;
bool optimize_biases = true;
// std::vector<double> init_guess = {0.35088209, -0.23738505, 0.14160885, 3.72785473, -6.45758308, 1.73769138};
std::vector<double> init_guess(3 * n_inner_neurons);
std::random_device seeder;
std::mt19937 gen(seeder());
std::uniform_real_distribution<double> dist(-1.0, 1.0);
for(unsigned int i = 0; i < init_guess.size(); ++i){
init_guess[i] = dist(gen);
}
test_analytical_gradient_y(init_guess, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te);