Skip to content
Snippets Groups Projects
dev_sandbox.cpp 6.29 KiB
Newer Older
  • Learn to ignore specific revisions
  • #include <exception>
    
    #include <4neuro.h>
    
    void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
                                     l4n::ErrorFunction& ef) {
    
        /* TRAINING METHOD SETUP */
        std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases()));
    
        for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
            domain_bounds[2 * i]     = -10;
            domain_bounds[2 * i + 1] = 10;
        }
    
        double c1          = 1.7;
        double c2          = 1.7;
        double w           = 0.7;
        size_t n_particles = 100;
        size_t iter_max    = 30;
    
        /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
         * terminating criterion is met */
        double gamma = 0.5;
    
        /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second
         * terminating criterion is met ('n' is the total number of particles) */
        double epsilon = 0.02;
        double delta   = 0.7;
    
        l4n::ParticleSwarm swarm_01(
            &domain_bounds,
            c1,
            c2,
            w,
            gamma,
            epsilon,
            delta,
            n_particles,
            iter_max
        );
        swarm_01.optimize(ef);
    
        net.copy_parameter_space(swarm_01.get_parameters());
    
        /* ERROR CALCULATION */
        std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval(nullptr) << std::endl;
        std::cout
            << "***********************************************************************************************************************"
            << std::endl;
    }
    
    double optimize_via_gradient_descent(l4n::NeuralNetwork& net,
                                       l4n::ErrorFunction& ef) {
    
        std::cout
            << "***********************************************************************************************************************"
            << std::endl;
        l4n::GradientDescentBB gd(1e-6,
                                  1000);
    
        gd.optimize(ef);
    
        net.copy_parameter_space(gd.get_parameters());
    
        /* ERROR CALCULATION */
        double err = ef.eval(nullptr);
        std::cout << "Run finished! Error of the network[Gradient descent]: " << err << std::endl;
    
        /* Just for validation test purposes - NOT necessary for the example to work! */
        return err;
    }
    
    double optimize_via_LBMQ(l4n::NeuralNetwork& net,
                                       l4n::ErrorFunction& ef) {
    
    	size_t max_iterations = 10000;
    	size_t batch_size = 0;
    	double tolerance = 1e-6;
    	double tolerance_gradient = tolerance;
    	double tolerance_parameters = tolerance;
    	
        std::cout
            << "***********************************************************************************************************************"
            << std::endl;
        l4n::LevenbergMarquardt lm(
    						   max_iterations,
                               batch_size,
                               tolerance,
                               tolerance_gradient,
                               tolerance_parameters
    						   );
    
        lm.optimize(ef);
    
        net.copy_parameter_space(lm.get_parameters());
    
        /* ERROR CALCULATION */
        double err = ef.eval(nullptr);
        // std::cout << "Run finished! Error of the network[Levenberg-Marquardt]: " << err << std::endl;
    
        /* Just for validation test purposes - NOT necessary for the example to work! */
    	return err;
    }
    
    void print_into_file(const char * fn, std::shared_ptr<l4n::DataSet> &ds, l4n::NeuralNetwork &net){
    	std::ofstream outfile;
    	outfile.open(fn, std::ios::out );
    	
       std::vector<double> output;
       output.resize(1);
    
       for(auto e : *ds->get_data()) {
    	   for(auto inp_e : e.first) {
    		   outfile << inp_e << " ";
    	   }
    	   outfile << e.second.at(0) << " ";
    	   net.eval_single(e.first, output);
    	   outfile << output.at(0) << std::endl;
       }	
       
    	outfile.close();
    
    
    int main() {
    
        try{
    
            /* Specify cutoff functions */
            l4n::CutoffFunction1 cutoff1(10.1);
            l4n::CutoffFunction2 cutoff2(12.5);
            l4n::CutoffFunction2 cutoff3(15.2);
            l4n::CutoffFunction2 cutoff4(10.3);
            l4n::CutoffFunction2 cutoff5(12.9);
    
            /* Specify symmetry functions */
            l4n::G1 sym_f1(&cutoff1);
            l4n::G2 sym_f2(&cutoff2, 0.15, 0.75);
            l4n::G2 sym_f3(&cutoff3, 0.1, 0.2);
            l4n::G3 sym_f4(&cutoff4, 0.3);
            l4n::G4 sym_f5(&cutoff5, 0.05, true, 0.05);
            l4n::G4 sym_f6(&cutoff5, 0.05, false, 0.05);
    
            std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f1, &sym_f2, &sym_f3, &sym_f4, &sym_f5, &sym_f6};
    
            l4n::Element helium = l4n::Element("He",
                                               helium_sym_funcs);
            std::unordered_map<l4n::ELEMENT_SYMBOL, l4n::Element*> elements;
            elements[l4n::ELEMENT_SYMBOL::He] = &helium;
    
            /* Read data */
            l4n::XYZReader reader("../../data/HE21+T4.xyz");
            reader.read();
    
            std::cout << "Finished reading data" << std::endl;
    
            std::shared_ptr<l4n::DataSet> ds = reader.get_acsf_data_set(elements);
    
            /* Create a neural network */
            std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<unsigned int>> n_hidden_neurons;
            n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {10};
    
            std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<l4n::NEURON_TYPE>> type_hidden_neurons;
            type_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {l4n::NEURON_TYPE::LOGISTIC};
    
            l4n::ACSFNeuralNetwork net(elements, *reader.get_element_list(), reader.contains_charge(), n_hidden_neurons, type_hidden_neurons);
    
            l4n::MSE mse(&net, ds.get());
    
            net.randomize_parameters();
            // optimize_via_particle_swarm(net, mse);
    		// double err1 = optimize_via_LBMQ(net, mse);
            double err2 = optimize_via_gradient_descent(net, mse);
    		
    		print_into_file("test_results_2k_BB.txt", ds, net);
    		
           /* Print fit comparison with real data */
           // std::vector<double> output;
           // output.resize(1);
    
           // for(auto e : *ds->get_data()) {
               // for(auto inp_e : e.first) {
                   // std::cout << inp_e << " ";
               // }
               // std::cout << e.second.at(0) << " ";
               // net.eval_single(e.first, output);
               // std::cout << output.at(0) << std::endl;
           // }
    
    	   if(err2 > 0.00001) {
    			throw std::runtime_error("Training was incorrect!");
    		}
    
    
        } catch (const std::exception& e) {
            std::cerr << e.what() << std::endl;
            exit(EXIT_FAILURE);
        }
    
        return 0;
    }