/** * Example of saving neural network to a file and loading it. * Network creation and training is copied from net_test_1. * * @author Martin Beseda * @date 9.8.18 */ #include <vector> #include <4neuro.h> int main() { std::cout << "Running lib4neuro Serialization example 1" << std::endl; std::cout << "********************************************************************************************************************************************" << std::endl; std::cout << "First, it finds an approximate solution to the system of equations below:" << std::endl; std::cout << "0 * w1 + 1 * w2 = 0.50 + b" << std::endl; std::cout << "1 * w1 + 0.5*w2 = 0.75 + b" << std::endl; std::cout << "********************************************************************************************************************************************" << std::endl; std::cout << "Then it stores the network with its weights into a file via serialization" << std::endl; std::cout << "Then it loads the network from a file via serialization" << std::endl; std::cout << "Finally it tests the loaded network parameters by evaluating the error function" << std::endl; std::cout << "********************************************************************************************************************************************" << std::endl; /* TRAIN DATA DEFINITION */ std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec; std::vector<double> inp, out; inp = {0, 1}; out = {0.5}; data_vec.emplace_back(std::make_pair(inp, out)); inp = {1, 0.5}; out = {0.75}; data_vec.emplace_back(std::make_pair(inp, out)); l4n::DataSet ds(&data_vec); /* NETWORK DEFINITION */ l4n::NeuralNetwork net; /* Input neurons */ std::shared_ptr<l4n::NeuronLinear> i1 = std::make_shared<l4n::NeuronLinear>(); std::shared_ptr<l4n::NeuronLinear> i2 = std::make_shared<l4n::NeuronLinear>(); /* Output neuron */ std::shared_ptr<l4n::NeuronLinear> o1 = std::make_shared<l4n::NeuronLinear>(); /* Adding neurons to the net */ size_t idx1 = net.add_neuron(i1, l4n::BIAS_TYPE::NO_BIAS); size_t idx2 = net.add_neuron(i2, l4n::BIAS_TYPE::NO_BIAS); size_t idx3 = net.add_neuron(o1, l4n::BIAS_TYPE::NEXT_BIAS); std::vector<double>* bv = net.get_parameter_ptr_biases(); for (size_t i = 0; i < 1; ++i) { bv->at(i) = 1.0; } /* Adding connections */ net.add_connection_simple(idx1, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); net.add_connection_simple(idx2, idx3, l4n::SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT); //net.randomize_weights(); /* specification of the input/output neurons */ std::vector<size_t> net_input_neurons_indices(2); std::vector<size_t> net_output_neurons_indices(1); net_input_neurons_indices[0] = idx1; net_input_neurons_indices[1] = idx2; net_output_neurons_indices[0] = idx3; net.specify_input_neurons(net_input_neurons_indices); net.specify_output_neurons(net_output_neurons_indices); /* ERROR FUNCTION SPECIFICATION */ l4n::MSE mse(&net, &ds); /* TRAINING METHOD SETUP */ std::vector<double> domain_bounds(2 * (net.get_n_weights() + net.get_n_biases())); for (size_t i = 0; i < domain_bounds.size() / 2; ++i) { domain_bounds[2 * i] = -10; domain_bounds[2 * i + 1] = 10; } double c1 = 1.7; double c2 = 1.7; double w = 0.7; size_t n_particles = 5; size_t iter_max = 10; /* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one * terminating criterion is met */ double gamma = 0.5; /* if 'delta' times 'n' particles are in the centroid neighborhood given by the radius 'epsilon', then the second * terminating criterion is met ('n' is the total number of particles) */ double epsilon = 0.02; double delta = 0.7; l4n::ParticleSwarm swarm_01( &domain_bounds, c1, c2, w, gamma, epsilon, delta, n_particles, iter_max ); swarm_01.optimize(mse); std::vector<double>* parameters = swarm_01.get_parameters(); net.copy_parameter_space(swarm_01.get_parameters()); printf("w1 = %10.7f\n", parameters->at(0)); printf("w2 = %10.7f\n", parameters->at(1)); printf(" b = %10.7f\n", parameters->at(2)); /* SAVE NETWORK TO THE FILE */ std::cout << "********************************************************************************************************************************************" << std::endl; std::cout << "Network generated by the example" << std::endl; net.write_stats(); net.save_text("saved_network.4nt"); std::cout << "--------------------------------------------------------------------------------------------------------------------------------------------" << std::endl; double error = 0.0; inp = {0, 1}; net.eval_single(inp, out); error += (0.5 - out[0]) * (0.5 - out[0]); std::cout << "x = (0, 1), expected output: 0.50, real output: " << out[0] << std::endl; inp = {1, 0.5}; net.eval_single(inp, out); error += (0.75 - out[0]) * (0.75 - out[0]); std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl; std::cout << "Error of the network: " << 0.5 * error << std::endl; std::cout << "********************************************************************************************************************************************" << std::endl; std::cout << "Network loaded from a file" << std::endl; l4n::NeuralNetwork net2("saved_network.4nt"); net2.write_stats(); std::cout << "--------------------------------------------------------------------------------------------------------------------------------------------" << std::endl; error = 0.0; inp = {0, 1}; net2.eval_single(inp, out); error += (0.5 - out[0]) * (0.5 - out[0]); std::cout << "x = (0, 1), expected output: 0.50, real output: " << out[0] << std::endl; inp = {1, 0.5}; net2.eval_single(inp, out); error += (0.75 - out[0]) * (0.75 - out[0]); std::cout << "x = (1, 0.5), expected output: 0.75, real output: " << out[0] << std::endl; std::cout << "Error of the network: " << 0.5 * error << std::endl; std::cout << "********************************************************************************************************************************************" << std::endl; return 0; }