Newer
Older
Martin Beseda
committed
//
// Created by martin on 25.11.18.
//
#include <iostream>
#include <cstdio>
#include <fstream>
#include <vector>
#include <utility>
#include <algorithm>
#include <assert.h>
#include "4neuro.h"
bool normalize_data = true;
double prec = 1e-9;

Michal Kravcenko
committed
double prec_lm = 1e-15;
int restart_interval = 500;
int max_n_iters_gradient = 10000;

Michal Kravcenko
committed
int max_n_iters_gradient_lm = 10000;
int max_n_iters_swarm = 20;
int n_particles_swarm = 200;

Michal Kravcenko
committed
int max_number_of_cycles = 5;
try {
/* PHASE 1 - TRAINING DATA LOADING, NETWORK ASSEMBLY AND PARTICLE SWARM OPTIMIZATION */
l4n::CSVReader reader1("/home/fluffymoo/Dropbox/data_BACK_RH_1.csv", ";", true); // File, separator, skip 1st line
reader1.read(); // Read from the file
/* PHASE 1 - NEURAL NETWORK SPECIFICATION */
/* Create data set for both the first training of the neural network */
/* Specify which columns are inputs or outputs */
std::vector<unsigned int> inputs = { 0 }; // Possible multiple inputs, e.g. {0,3}, column indices starting from 0
std::vector<unsigned int> outputs = { 2 }; // Possible multiple outputs, e.g. {1,2}
l4n::DataSet ds1 = reader1.get_data_set(&inputs, &outputs); // Creation of data-set for NN
if(normalize_data){
ds1.normalize(); // Normalization of data to prevent numerical problems
}
/* Numbers of neurons in layers (including input and output layers) */

Michal Kravcenko
committed
std::vector<unsigned int> neuron_numbers_in_layers = { 1, 6, 6, 1 };
/* Fully connected feed-forward network with linear activation functions for input and output */
/* layers and the specified activation fns for the hidden ones (each entry = layer)*/
std::vector<l4n::NEURON_TYPE> hidden_type_v = { l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC }; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
l4n::FullyConnectedFFN nn1(&neuron_numbers_in_layers, &hidden_type_v);
Martin Beseda
committed
l4n::MSE mse1(&nn1, &ds1); // First parameter - neural network, second parameter - data-set
Martin Beseda
committed
/* Particle Swarm method domain*/
std::vector<double> domain_bounds(2 * (nn1.get_n_weights() + nn1.get_n_biases()));
for (size_t i = 0; i < domain_bounds.size() / 2; ++i) {
domain_bounds[2 * i] = -0.1;
domain_bounds[2 * i + 1] = 0.1;
// 1) domain_bounds Bounds for every optimized parameter (p1_lower, p1_upper, p2_lower, p2_upper...)
// 2) c1 Cognitive parameter
// 3) c2 Social parameter
// 4) w Inertia weight
// 5) gamma Threshold value for particle velocity - all particles must posses the same or slower velocity for the algorithm to end
// 6) epsilon Radius of the cluster area (Euclidean distance)
// 7) delta Amount of particles, which has to be in the cluster for the algorithm to stop (0-1)
// 8) n_particles Number of particles in the swarm
// 9) iter_max Maximal number of iterations - optimization will stop after that, even if not converged
l4n::ParticleSwarm ps(&domain_bounds,
1.711897,
1.711897,
0.711897,
0.5,
0.3,
0.7,

Michal Kravcenko
committed
// 1) Threshold for the successful ending of the optimization - deviation from minima
// 2) Number of iterations to reset step size to tolerance/10.0
// 3) Maximal number of iterations - optimization will stop after that, even if not converged

Michal Kravcenko
committed
l4n::GradientDescent gs_(prec, restart_interval, max_n_iters_gradient, batch_size);
l4n::GradientDescentBB gs(prec, restart_interval, max_n_iters_gradient, batch_size);

Michal Kravcenko
committed
l4n::GradientDescentSingleItem gs_si(prec, 0, 5000);//TODO needs improvement
l4n::LevenbergMarquardt leven(max_n_iters_gradient_lm, prec_lm);
l4n::LearningSequence learning_sequence( 1e-6, max_number_of_cycles );
learning_sequence.add_learning_method( &ps );
// learning_sequence.add_learning_method( &gs );
learning_sequence.add_learning_method( &leven );

Michal Kravcenko
committed
// learning_sequence.add_learning_method( &gs_ );
// learning_sequence.add_learning_method( &gs_si );
// learning_sequence.add_learning_method( &gs );
/* Weight and bias randomization in the network accordingly to the uniform distribution */
nn1.randomize_parameters();
/* Complex Optimization */
learning_sequence.optimize(mse1); // Network training
nn1.save_text("test_net_Gradient_Descent.4n");
std::string filename = "simulator_output.txt";
std::ofstream output_file(filename);
if (!output_file.is_open()) {
throw std::runtime_error("File '" + filename + "' can't be opened!");
}
l4n::NeuralNetwork nn3("test_net_Gradient_Descent.4n");
/* Check of the saved network - write to the file */
output_file << std::endl << "The loaded network info:" << std::endl;
nn3.write_stats(&output_file);
nn3.write_weights(&output_file);
nn3.write_biases(&output_file);
//
// /* Evaluate network on an arbitrary data-set and save results into the file */
l4n::CSVReader reader3("/home/fluffymoo/Dropbox/data_BACK_RH_1.csv", ";", true); // File, separator, skip 1st line
//
// /* Create data set for both the testing of the neural network */
// /* Specify which columns are inputs or outputs */
//
l4n::DataSet ds3 = reader3.get_data_set(&inputs, &outputs); // Creation of data-set for NN
if(normalize_data){
ds3.normalize(); // Normalization of data to prevent numerical problems
}
//
// output_file << std::endl << "Evaluating network on the dataset: " << std::endl;
// ds3.store_data_text(&output_file);
//
output_file << "Output and the error:" << std::endl;
l4n::MSE mse3(&nn3, &ds3); // First parameter - neural network, second parameter - data-set

Michal Kravcenko
committed
mse3.eval_on_data_set(&ds3, &output_file, nullptr, normalize_data, true);
/* Close the output file for writing */
output_file.close();
Martin Beseda
committed
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);