Skip to content
Snippets Groups Projects
simulator.cpp 8.71 KiB
Newer Older
  • Learn to ignore specific revisions
  • //
    // Created by martin on 25.11.18.
    //
    
    /**
     * This file serves for testing of various examples, have fun!
     *
     * @author Michal Kravčenko
     * @date 14.6.18 -
     */
    
    #include <iostream>
    #include <cstdio>
    #include <fstream>
    #include <vector>
    #include <utility>
    #include <algorithm>
    #include <assert.h>
    
    #include "4neuro.h"
    
    int main(int argc, char** argv){
    
    
            /* TO BE CHANGED BY USER: Read data from the file */
            l4n::CSVReader reader("/tmp/lib4neuro_pokus/simulator_input.txt", "\t", true);  // File, separator, skip 1st line
    
            reader.read();  // Read from the file
    
            /* TO BE CHANGED BY USER: Open file for writing */
    
            std::string filename = "simulator_output.txt";
            std::ofstream output_file(filename);
            if(!output_file.is_open()) {
                throw std::runtime_error("File '" + filename + "' can't be opened!");
            }
    
    
            /* TO BE CHANGED BY USER: Create data set for both the training and testing of the neural network */
            std::vector<unsigned int> inputs = { 0 };  // Possible multiple inputs, e.g. {0,3}, column indices starting from 0
    
            std::vector<unsigned int> outputs = { 1 };  // Possible multiple outputs, e.g. {1,2}
    
            l4n::DataSet ds = reader.get_data_set(&inputs, &outputs);  // Creation of data-set for NN
            ds.normalize();  // Normalization of data to prevent numerical problems
    
    //        ds.print_data();  // Printing of data-set to check it
    
            /* Neural network construction */
    
            // TO BE CHANGED BY USER: Numbers of neurons in layers (including input and output layers)
            std::vector<unsigned int> neuron_numbers_in_layers = {1, 10, 1};
    
    
            // Creation of fully connected feed-forward network with linear activation functions for input and output
            // layers and the specified a.f. for the hidden ones
    
            // TO BE CHANGED BY USER: NEURON_TYPE
            std::vector<l4n::NEURON_TYPE> hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC}; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
            l4n::FullyConnectedFFN nn(&neuron_numbers_in_layers, &hidden_type_v);
    
            /* Error function */
    
            l4n::MSE mse(&nn, &ds);  // First parameter - neural network, second parameter - data-set
    
            /* Domain - important for Particle Swarm method */
    
            std::vector<double> domain_bounds(2 * (nn.get_n_weights() + nn.get_n_biases()));
    
            for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
                domain_bounds[2 * i] = -10;
                domain_bounds[2 * i + 1] = 10;
            }
    
    
            /* Training method */
    
    
            // Parameters
            // 1) domain_bounds Bounds for every optimized parameter (p1_lower, p1_upper, p2_lower, p2_upper...)
            // 2) c1 Cognitive parameter
            // 3) c2 Social parameter
            // 4) w Inertia weight
            // 5) gamma Threshold value for particle velocity - all particles must posses the same or slower velocity for the algorithm to end
            // 6) epsilon Radius of the cluster area (Euclidean distance)
            // 7) delta Amount of particles, which has to be in the cluster for the algorithm to stop (0-1)
            // 8) n_particles Number of particles in the swarm
            // 9) iter_max Maximal number of iterations - optimization will stop after that, even if not converged
    
            //    BEGIN: TO BE (UN)COMMENTED ANG CHANGED BY USER
            //    l4n::ParticleSwarm ps(&domain_bounds,
            //                          1.711897,
            //                          1.711897,
            //                          0.711897,
            //                          0.5,
            //                          20,
            //                          0.7,
            //                          600,
            //                          1000);
            //    END: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
    
            // Parameters
            // 1) Threshold for the successful ending of the optimization - deviation from minima
            // 2) Number of iterations to reset step size to tolerance/10.0
            // 3) Maximal number of iterations - optimization will stop after that, even if not converged
    
            //    BEGIN: TO BE (UN)COMMENTED ANG CHANGED BY USER
            l4n::GradientDescent gs(1e-4, 100, 200);
            //    END: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
    
            // Weight and bias randomization in the network according to the uniform distribution
            // Calling methods nn.randomize_weights() and nn.randomize_biases()
            nn.randomize_parameters();
    
    
    //    BEGIN: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
    //        gs.optimize(mse);  // Network training
    
    //    END: TO BE (UN)COMMENTED ANG CHANGED BY USER
    //
    //    evaluate given input
    //    BEGIN: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
    //        std::vector<double> i(ds.get_input_dim());
    //        std::vector<double> o(ds.get_output_dim());
    //        nn.eval_single(i, o);  // Evaluate network for one input and save the result into the output vector
    
    //    END: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
    
            /* Cross - validation */
    
    //    BEGIN: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
            l4n::CrossValidator cv(&gs, &mse);
    
            // Parameters:
            // 1) Number of data-set parts used for CV
            // 2) Number of tests performed
            // git 3) File-path to the files with data from cross-validation (one CV run - one file)
            cv.run_k_fold_test(10, 3, &output_file);
    
    //    END: TO BE (UN)COMMENTED ANG CHANGED BY USER
    
            /* Save network to the text file */
    
            nn.save_text("test_net.4n");
    
    
            /* Check of the saved network - print to STDOUT */
    
            std::cout << std::endl << "The original network info:" << std::endl;
    
            nn.write_stats();
            nn.write_weights();
            nn.write_biases();
    
            l4n::NeuralNetwork nn_loaded("test_net.4n");
    
    //      end: nahrani site
            std::cout << std::endl << "The loaded network info:" << std::endl;
    
            nn_loaded.write_stats();
            nn.write_weights();
            nn.write_biases();
    
            /* Check of the saved network - write to the file */
            output_file << std::endl << "The original network info:" << std::endl;
            nn.write_stats(&output_file);
            nn.write_weights(&output_file);
            nn.write_biases(&output_file);
    
            output_file << std::endl << "The loaded network info:" << std::endl;
            nn_loaded.write_stats(&output_file);
            nn.write_weights(&output_file);
            nn.write_biases(&output_file);
    
            /* Evaluate network on an arbitrary data-set and save results into the file */
    
            /* TO BE CHANGED BY USER: Read data from the file */
            l4n::CSVReader reader2("/tmp/lib4neuro_pokus/simulator_input.txt", "\t", true);  // File, separator, skip 1st line
            reader2.read();  // Read from the file
    
            /* TO BE CHANGED BY USER: Create data set for both the training and testing of the neural network */
            std::vector<unsigned int> inputs2 = { 0 };  // Possible multiple inputs, e.g. {0,3}, column indices starting from 0
            std::vector<unsigned int> outputs2 = { 1 };  // Possible multiple outputs, e.g. {1,2}
    
            l4n::DataSet ds2 = reader2.get_data_set(&inputs2, &outputs2);  // Creation of data-set for NN
            ds2.normalize();  // Normalization of data to prevent numerical problems
    
            /* TO BE CHANGED BY USER: Example of evaluation of a single input, normalized input, de-normalized output */
    //        std::vector<double> input_norm(ds2.get_input_dim()),
    //                            input(ds2.get_input_dim()),
    //                            output_norm(ds2.get_output_dim()),
    //                            expected_output_norm(ds2.get_output_dim()),
    //                            output(ds2.get_output_dim()),
    //                            expected_output(ds2.get_output_dim());
    //
    //        size_t data_idx = 0;
    //        ds2.get_input(input_norm, data_idx);
    //        ds2.get_output(expected_output_norm, data_idx);
    
    //        for(size_t i = 0; i < ds2.get_n_elements(); i++) {
    //            data_idx = i;
    //            ds2.get_input(input_norm, data_idx);
    //            ds2.get_output(expected_output_norm, data_idx);
    //            nn_loaded.eval_single(input_norm, output_norm);
    //            ds2.de_normalize_single(output_norm, output);
    //            ds2.de_normalize_single(input_norm, input);
    //            ds2.de_normalize_single(expected_output_norm, expected_output);
    
            output_file << std::endl << "Evaluating network on the dataset: " << std::endl;
            ds2.store_data_text(&output_file);
    
            output_file << "Output and the error:" << std::endl;
    //     pozor! vraci normalizovana data
            mse.eval_on_data_set(&ds2, &output_file);
    
    
            /* Close the output file for writing */
            output_file.close();
    
        } catch(const std::exception& e) {