Newer
Older
* Example testing the correctness of back-propagation implementation

Michal Kravcenko
committed
* */
#include <iostream>
#include <cstdio>
#include <fstream>
#include <utility>
#include <algorithm>
#include <assert.h>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int_distribution.hpp>
#include <boost/random/uniform_real_distribution.hpp>
double get_difference(std::vector<double> &a, std::vector<double> &b){
// std::cout << a[i] << " - " << b[i] << std::endl;
void calculate_gradient_analytical(std::vector<double> &input, std::vector<double> ¶meter_biases, std::vector<double> ¶meter_weights, size_t n_hidden_neurons, std::vector<double> &gradient_analytical ){
double a, b, y, x = input[0];
for( size_t i = 0; i < n_hidden_neurons; ++i ){
a = parameter_weights[i];
b = parameter_biases[i];
y = parameter_weights[n_hidden_neurons + i];
gradient_analytical[i] += y * x * std::exp(b - a * x) / ((1+std::exp(b - a * x))*(1+std::exp(b - a * x)));
gradient_analytical[n_hidden_neurons + i] += 1.0 / ((1+std::exp(b - a * x)));
gradient_analytical[2*n_hidden_neurons + i] -= y * std::exp(b - a * x) / ((1+std::exp(b - a * x))*(1+std::exp(b - a * x)));
}
try {
/* Numbers of neurons in layers (including input and output layers) */
std::vector<unsigned int> neuron_numbers_in_layers(3);
neuron_numbers_in_layers[0] = neuron_numbers_in_layers[2] = 1;
neuron_numbers_in_layers[1] = n_hidden_neurons;
/* Fully connected feed-forward network with linear activation functions for input and output */
/* layers and the specified activation fns for the hidden ones (each entry = layer)*/
std::vector<l4n::NEURON_TYPE> hidden_type_v = { l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LOGISTIC }; // hidden_type_v = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR}
l4n::FullyConnectedFFN nn1(&neuron_numbers_in_layers, &hidden_type_v);
nn1.randomize_parameters();

Michal Kravcenko
committed
boost::random::mt19937 gen(std::time(0));
boost::random::uniform_real_distribution<> dist(-1, 1);
size_t n_parameters = nn1.get_n_weights() + nn1.get_n_biases();
std::vector<double> gradient_backprogation(n_parameters);
std::vector<double> gradient_analytical(n_parameters);
std::vector<double> *parameter_biases = nn1.get_parameter_ptr_biases();
std::vector<double> *parameter_weights = nn1.get_parameter_ptr_weights();
std::vector<double> error_derivative = {1};
std::vector<double> input(1);
std::vector<double> output(1);
std::fill(gradient_backprogation.begin(), gradient_backprogation.end(), 0);
std::fill(gradient_analytical.begin(), gradient_analytical.end(), 0);
calculate_gradient_analytical(input, *parameter_biases, *parameter_weights, n_hidden_neurons, gradient_analytical );
nn1.add_to_gradient_single(input, error_derivative, 1, gradient_backprogation);
double diff = get_difference(gradient_backprogation, gradient_analytical);
n_good++;
}
else{
n_bad++;
}
}
std::cout << "Good gradients: " << n_good << ", Bad gradients: " << n_bad << std::endl;

Michal Kravcenko
committed
}
catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);

Michal Kravcenko
committed
}