From a47613d0f4f6f01d274f0e0a5c23d5ce644a0480 Mon Sep 17 00:00:00 2001
From: bes0030 <bes0030@login3.smc.salomon.it4i.cz>
Date: Mon, 2 Sep 2019 16:27:08 +0200
Subject: [PATCH] [WIP] trying to find an optimal configuration of network to
 fit He4+ cluster

---
 src/examples/dev_sandbox.cpp | 38 +++++++++++++++++-------------------
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/src/examples/dev_sandbox.cpp b/src/examples/dev_sandbox.cpp
index 97255099..624aab95 100644
--- a/src/examples/dev_sandbox.cpp
+++ b/src/examples/dev_sandbox.cpp
@@ -12,15 +12,15 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
     std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
 
     for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
-        domain_bounds[2 * i]     = -10;
-        domain_bounds[2 * i + 1] = 10;
+        domain_bounds[2 * i]     = -150;
+        domain_bounds[2 * i + 1] = 150;
     }
 
     double c1          = 1.7;
     double c2          = 1.7;
     double w           = 0.7;
-    size_t n_particles = 100;
-    size_t iter_max    = 30;
+    size_t n_particles = 300;
+    size_t iter_max    = 500;
 
     /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
      * terminating criterion is met */
@@ -60,7 +60,8 @@ double optimize_via_gradient_descent(l4n::NeuralNetwork& net,
         << "***********************************************************************************************************************"
         << std::endl;
     l4n::GradientDescentBB gd(1e-6,
-                              1000);
+                              1000,
+                              60000);
 
     gd.optimize(ef);
 
@@ -113,18 +114,18 @@ int main() {
         /* Specify cutoff functions */
 //        l4n::CutoffFunction1 cutoff1(10.1);
         l4n::CutoffFunction2 cutoff1(8);
-        l4n::CutoffFunction2 cutoff2(25);
+        //l4n::CutoffFunction2 cutoff2(25);
 //        l4n::CutoffFunction2 cutoff2(15.2);
 //        l4n::CutoffFunction2 cutoff4(10.3);
 //        l4n::CutoffFunction2 cutoff5(12.9);
 //        l4n::CutoffFunction2 cutoff6(11);
 
         /* Specify symmetry functions */
-        l4n::G1 sym_f1(&cutoff1);
+//        l4n::G1 sym_f1(&cutoff1);
         l4n::G2 sym_f2(&cutoff1, 2.09, 0.8);
         l4n::G2 sym_f3(&cutoff1, 0.01, 0.04);
-        l4n::G2 sym_f4(&cutoff2, 0.02, 0.04);
-        l4n::G2 sym_f5(&cutoff2, 2.09, 0.04);
+//        l4n::G2 sym_f4(&cutoff2, 0.02, 0.04);
+//        l4n::G2 sym_f5(&cutoff2, 2.09, 0.04);
 
 
 //        l4n::G3 sym_f4(&cutoff4, 0.3);
@@ -133,7 +134,7 @@ int main() {
 //        l4n::G4 sym_f7(&cutoff6, 0.5, true, 0.05);
 //        l4n::G4 sym_f8(&cutoff6, 0.5, false, 0.05);
 
-        std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f1, &sym_f2, &sym_f3, &sym_f4, &sym_f5}; //, &sym_f6, &sym_f7, &sym_f8};
+        std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f2, &sym_f3}; //, &sym_f4, &sym_f5}; //, &sym_f6, &sym_f7, &sym_f8};
 
         l4n::Element helium = l4n::Element("He",
                                            helium_sym_funcs);
@@ -141,7 +142,7 @@ int main() {
         elements[l4n::ELEMENT_SYMBOL::He] = &helium;
 
         /* Read data */
-        l4n::XYZReader reader("/home/martin/Desktop/HE4+T0.xyz", true);
+        l4n::XYZReader reader("/home/bes0030/HE4+T0.xyz", true);
         reader.read();
 
         std::cout << "Finished reading data" << std::endl;
@@ -150,7 +151,7 @@ int main() {
 
         /* Create a neural network */
         std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<unsigned int>> n_hidden_neurons;
-        n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {10, 1};
+        n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {20, 1};
 
         std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<l4n::NEURON_TYPE>> type_hidden_neurons;
         type_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR};
@@ -159,16 +160,13 @@ int main() {
 
         l4n::MSE mse(&net, ds.get());
 
-        std::cout << net.get_min_max_weight().first << " " << net.get_min_max_weight().second << std::endl;
 
         net.randomize_parameters();
-        // optimize_via_particle_swarm(net, mse);
-        double err1 = optimize_via_LBMQ(net, mse);
+         optimize_via_particle_swarm(net, mse);
+//        double err1 = optimize_via_LBMQ(net, mse);
         double err2 = optimize_via_gradient_descent(net, mse);
 		
-		if(err2 > 0.00001) {
-			throw std::runtime_error("Training was incorrect!");
-		}
+        std::cout << "Weights: " << net.get_min_max_weight().first << " " << net.get_min_max_weight().second << std::endl;
 
         /* Print fit comparison with real data */
         std::vector<double> output;
@@ -177,11 +175,11 @@ int main() {
         for(auto e : *ds->get_data()) {
             for(unsigned int i = 0; i < e.first.size(); i++) {
                 std::cout << e.first.at(i) << " ";
-                if(i % 5 == 4) {
+                if(i % 2 == 1) {
                     std::cout << std::endl;
                 }
             }
-            std::cout << e.second.at(0) << " ";
+            std::cout << "OUTS (DS, predict): " << e.second.at(0) << " ";
             net.eval_single(e.first, output);
             std::cout << output.at(0) << std::endl;
         }
-- 
GitLab