Commit 24948047 authored by Michal Kravcenko's avatar Michal Kravcenko

Commit before merge

parent f220e8e6
......@@ -327,23 +327,28 @@ void DESolver::solve_via_particle_swarm(double *domain_bounds, double c1, double
DataSet *ds;
/* DEFINITION OF THE PARTIAL ERROR FUNCTIONS */
std::vector<ErrorFunction*> error_functions( this->n_equations );
std::vector<ErrorFunction*> error_functions( 0 );
for(size_t i = 0; i < this->n_equations; ++i ){
nn = this->differential_equations->at( i );
ds = this->errors_functions_data_sets->at( i );
if(!ds){
//empty error function
continue;
}
nn = this->differential_equations->at( i );
if( this->errors_functions_types->at( i ) == ErrorFunctionType::ErrorFuncMSE ){
error_functions[i] = new MSE( nn, ds );
error_functions.push_back(new MSE( nn, ds ));
}
else{
//default
error_functions[i] = new MSE( nn, ds );
error_functions.push_back(new MSE( nn, ds ));
}
}
/* DEFINITION OF THE GLOBAL ERROR FUNCTION */
ErrorSum total_error;
for(size_t i = 0; i < this->n_equations; ++i ) {
for(size_t i = 0; i < error_functions.size(); ++i ) {
total_error.add_error_function( error_functions[i], 1.0 );
}
......
......@@ -10,10 +10,9 @@
* NN representation: sum over [a_i * (1 + e^(bi - x * w_ix - t * w_it))^(-1)]
* -------------------------------------------
* Optimal NN setting with biases (4 inner neurons)
* Path 1. wx = 0.51954589, wt = -0.48780445, b = 0.35656955, a = 1.69279158
* Path 2. wx = -1.24173503, wt = 1.13351300, b = 0.32528567, a = 1.69148458
* Path 3. wx = 0.64754127, wt = 0.95758760, b = -0.95852707, a = 2.77877453
* Path 4. wx = 1.65439557, wt = -0.31784248, b = -1.81237586, a = -3.96157108
* Path 1. wx = -2.08690916, wt = 0.64501935, b = 1.79041851, a = 2.92924903
* Path 2. wx = -1.89510969, wt = 1.15023222, b = -0.25186595, a = 0.85740615
* Path 3. wx = -0.70835142, wt = -0.72551198, b = 0.69366617, a = -2.65668753
* @author Michal Kravčenko
* @date 9.8.18
*/
......@@ -247,6 +246,14 @@ double eval_approx_db_yxx(double x, double t, size_t neuron_idx, std::vector<dou
return (ai * wxi * wxi * eb * ebp) / (ei1 * ei1 * ei1) - (ai * wxi * wxi * ebp * (etx - eb)) / (ei1 * ei1 * ei1) + (3 * ai * wxi * wxi * eb * ebp * (etx - eb)) / (ei1 * ei1 * ei1 * ei1);
}
double get_step_size_simple(){
}
double get_step_size_mk(){
}
void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t n_inner_neurons, size_t train_size, double ds, double de, size_t n_test_points, double ts, double te){
/* SETUP OF THE TRAINING DATA */
std::vector<double> inp, out;
......@@ -375,14 +382,15 @@ void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t
val = total_error;
/* Update of the parameters */
/* step length calculation */
if(iter_idx < 10){
/* fixed step length */
gamma = 0.1 * accuracy;
}
/* norm of the gradient calculation */
grad_norm_prev = grad_norm;
grad_norm = 0.0;
for(auto v: *gradient_current){
grad_norm += v * v;
}
grad_norm = std::sqrt(grad_norm);
/* adaptive step-length */
sk = 0.0;
for(i = 0; i < gradient_current->size(); ++i){
......@@ -391,6 +399,13 @@ void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t
}
sk = std::sqrt(sk);
/* step length calculation */
if(iter_idx < 10){
/* fixed step length */
gamma = 0.1 * accuracy;
}
if(val > prev_val){
gamma *= 0.99999;
}
......@@ -409,11 +424,6 @@ void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t
}
// gamma *= 0.999999;
grad_norm = 0.0;
for(auto v: *gradient_current){
grad_norm += v * v;
}
grad_norm = std::sqrt(grad_norm);
// gamma = 0.000001;
......@@ -432,7 +442,7 @@ void solve_example_gradient(std::vector<double> &guess, double accuracy, size_t
if(iter_idx % 1 == 0){
printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Total error: %10.8f\r", (int)iter_idx, gamma, grad_norm, total_error);
printf("Iteration %12d. Step size: %15.8f, Gradient norm: %15.8f. Gradient change: %15.8f, Total error: %10.8f\r", (int)iter_idx, gamma, grad_norm, sk, total_error);
std::cout.flush();
}
}
......@@ -696,9 +706,9 @@ void solve_example_particle_swarm(double accuracy, size_t n_inner_neurons, size_
int main() {
unsigned int n_inner_neurons = 4;
unsigned int train_size = 20;
double accuracy = 1e-4;
unsigned int n_inner_neurons = 3;
unsigned int train_size = 40;
double accuracy = 0.0005;
double ds = 0.0;
double de = 1.0;
......@@ -706,21 +716,21 @@ int main() {
double ts = ds;
double te = de + 0;
size_t particle_swarm_max_iters = 100;
size_t n_particles = 200;
solve_example_particle_swarm(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
// size_t particle_swarm_max_iters = 100;
// size_t n_particles = 200;
// solve_example_particle_swarm(accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te, particle_swarm_max_iters, n_particles);
//
// std::vector<double> init_guess(4 * n_inner_neurons);
// std::random_device seeder;
// std::mt19937 gen(seeder());
// std::uniform_real_distribution<double> dist(-1.0, 1.0);
// for(unsigned int i = 0; i < init_guess.size(); ++i){
// init_guess[i] = dist(gen);
// }
//
//// init_guess = {-0.21709230, -0.26189447, 0.77853923, 0.41091127, -0.44311897, -0.99036349, 0.84912023, -0.16920743};
// solve_example_gradient(init_guess, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te);
std::vector<double> init_guess(4 * n_inner_neurons);
std::random_device seeder;
std::mt19937 gen(seeder());
std::uniform_real_distribution<double> dist(-1.0, 1.0);
for(unsigned int i = 0; i < init_guess.size(); ++i){
init_guess[i] = dist(gen);
}
// init_guess = {-0.21709230, -0.26189447, 0.77853923, 0.41091127, -0.44311897, -0.99036349, 0.84912023, -0.16920743};
solve_example_gradient(init_guess, accuracy, n_inner_neurons, train_size, ds, de, test_size, ts, te);
return 0;
}
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment