Commit 39503418 authored by Michal Kravcenko's avatar Michal Kravcenko

-corrected test2

parent f31cb7a2
......@@ -25,26 +25,35 @@ double MSE::eval(double *weights) {
std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
this->net->copy_weights(weights);
// //TODO instead use something smarter
// this->net->copy_weights(weights);
std::vector<double> output( dim_out );
for(unsigned int i = 0; i < n_elements; ++i){ // Iterate through every element in the test set
this->net->eval_single(std::get<0>(data->at(i)), output); // Compute the net output and store it into 'output' variable
this->net->eval_single(std::get<0>(data->at(i)), output, weights); // Compute the net output and store it into 'output' variable
// printf("errors: ");
for(unsigned int j = 0; j < dim_out; ++j) { // Compute difference for every element of the output vector
val = output[j] - std::get<1>(data->at(i))[j];
error += val * val;
// printf("%f, ", val * val);
}
// printf("\n");
}
// printf("n_elements: %d\n", n_elements);
return error/n_elements;
}
MSE_SUM::MSE_SUM() {
this->summand = nullptr;
this->dimension = 0;
}
MSE_SUM::~MSE_SUM(){
......@@ -68,18 +77,22 @@ void MSE_SUM::add_error_function(ErrorFunction *F) {
this->summand = new std::vector<ErrorFunction*>(0);
}
this->summand->push_back(F);
if(F->get_dimension() > this->dimension){
this->dimension = F->get_dimension();
}
}
size_t MSE_SUM::get_dimension() {
if(!this->dimension) {
size_t max = 0;
for(auto e : *this->summand) {
if(e->get_dimension() > max) {
max = e->get_dimension();
}
};
this->dimension = max;
}
// if(!this->dimension) {
// size_t max = 0;
// for(auto e : *this->summand) {
// if(e->get_dimension() > max) {
// max = e->get_dimension();
// }
// };
//
// this->dimension = max;
// }
return this->dimension;
}
\ No newline at end of file
......@@ -47,7 +47,7 @@ public:
* @param weights
* @return
*/
virtual double eval(double* weights);
virtual double eval(double* weights = nullptr);
private:
......
......@@ -45,6 +45,7 @@ Neuron* Connection::get_neuron_out() {
//}
void Connection::pass_signal() {
// printf("passing signal between neurons %d -> %d, value: %f * %f\n", this->neuron_in, this->neuron_out, this->neuron_in->get_state(), this->con->eval());
this->neuron_out->adjust_potential(this->neuron_in->get_state() * this->con->eval());
}
......
......@@ -16,7 +16,7 @@ protected:
/**
*
*/
std::vector<double>* weight_array = nullptr;
std::vector<double> * weight_array = nullptr;
/**
*
......
......@@ -366,7 +366,7 @@ void NeuralNetwork::set_weight_array(std::vector<double> *weight_ptr) {
this->delete_weights = false;
}
void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double> &output) {
void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double> &output, double * custom_weights) {
if(!this->in_out_determined && this->n_inputs * this->n_outputs <= 0){
// this->determine_inputs_outputs();
std::cerr << "Input and output neurons have not been specified\n" << std::endl;
......@@ -384,6 +384,18 @@ void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double>
exit(-1);
}
// std::vector<double> *weight_ptr = this->connection_weights;
// std::vector<double> *custom_weight_ptr = nullptr;
//
//
if(custom_weights != nullptr){
// custom_weight_ptr = new std::vector<double>(custom_weights, custom_weights + this->n_weights);
// this->connection_weights = custom_weight_ptr;
this->connection_weights->assign(custom_weights, custom_weights + this->n_weights);
}
std::fill(output.begin(), output.end(), 0.0);
//reset of the potentials
......@@ -416,7 +428,7 @@ void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double>
neuron->set_potential(input[i]);
//printf("INPUT NEURON %2d, POTENTIAL: %f\n", i, input[i]);
// printf("INPUT NEURON %d, POTENTIAL: %f\n", neuron, input[i]);
++i;
}
......@@ -429,6 +441,7 @@ void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double>
for(i = 0; i < active_set_size[idx1]; ++i){
active_neuron = this->active_eval_set->at(i + n * idx1);
active_neuron->activate();
// printf(" active neuron %d, state: %f\n", active_neuron, active_neuron->get_state());
for(Connection* connection: *(active_neuron->get_connections_out())){
connection->pass_signal();
......@@ -454,12 +467,12 @@ void NeuralNetwork::eval_single(std::vector<double> &input, std::vector<double>
output[i] = neuron->get_state();
//printf("OUTPUT NEURON %2d, VALUE: %f\n", i, output[i]);
// printf("OUTPUT NEURON %d, VALUE: %f\n", neuron, output[i]);
++i;
}
// this->connection_weights = weight_ptr;
}
void NeuralNetwork::copy_weights(double *weights) {
......
......@@ -113,7 +113,7 @@ public:
* @param[in] input
* @param[in,out] output
*/
void eval_single(std::vector<double> &input, std::vector<double> &output);
void eval_single(std::vector<double> &input, std::vector<double> &output, double *custom_weights = nullptr);
......
/**
* Example of a set neural networks dependent on each other
* Example of a neural network with reused edge weights
* The system of equations associated with the net in this example is not regular
* minimizes the function: ((2y+0.5)^2 + (2x+1)^2 + (2x + y + 0.25)^2 + (2x+1)^2 + 1 + (4.5x + 0.37)^2 ) /3
* minimum [0.705493164] at (x, y) = (-1133/6290, -11193/62900) = (-0.180127186, -0.177949126)
*/
//
// Created by martin on 7/16/18.
// Created by Michal on 7/17/18.
//
#include <vector>
......@@ -14,24 +17,21 @@
int main() {
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_01, data_vec_02;
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
std::vector<double> inp, out;
inp = {0, 1};
out = {0.5};
data_vec_01.emplace_back(std::make_pair(inp, out));
inp = {0, 1, 0};
out = {0.5, 0};
data_vec.emplace_back(std::make_pair(inp, out));
inp = {1, 0.5};
out = {0.75};
data_vec_01.emplace_back(std::make_pair(inp, out));
inp = {1, 0.5, 0};
out = {0.75, 0};
data_vec.emplace_back(std::make_pair(inp, out));
DataSet ds_01(&data_vec_01);
inp = {1.25};
out = {0.63};
data_vec_02.emplace_back(std::make_pair(inp, out));
DataSet ds_02(&data_vec_02);
inp = {0, 0, 1.25};
out = {0, 0.63};
data_vec.emplace_back(std::make_pair(inp, out));
DataSet ds(&data_vec);
/* NETWORK DEFINITION */
NeuralNetwork net;
......@@ -78,26 +78,15 @@ int main() {
net.specify_output_neurons(net_output_neurons_indices);
/* CONSTRUCTION OF SUBNETWORKS */
std::vector<size_t> subnet_01_input_neurons, subnet_01_output_neurons;
std::vector<size_t> subnet_02_input_neurons, subnet_02_output_neurons;
subnet_01_input_neurons.push_back(idx1);
subnet_01_input_neurons.push_back(idx2);
subnet_01_output_neurons.push_back(idx3);
NeuralNetwork *subnet_01 = net.get_subnet(subnet_01_input_neurons, subnet_01_output_neurons);
subnet_02_input_neurons.push_back(idx4);
subnet_02_output_neurons.push_back(idx5);
NeuralNetwork *subnet_02 = net.get_subnet(subnet_02_input_neurons, subnet_02_output_neurons);
/* COMPLEX ERROR FUNCTION SPECIFICATION */
MSE mse_01(subnet_01, &ds_01);
MSE mse_02(subnet_02, &ds_02);
MSE mse(&net, &ds);
// double weights[2] = {-0.18012411, -0.17793740};
// double weights[2] = {1, 1};
MSE_SUM mse_sum;
mse_sum.add_error_function( &mse_01 );
mse_sum.add_error_function( &mse_02 );
// printf("evaluation of error at point (%f, %f) => %f\n", weights[0], weights[1], mse.eval(weights));
/* TRAINING METHOD SETUP */
unsigned int n_edges = 2;
......@@ -109,14 +98,13 @@ int main() {
double c1 = 0.5, c2 = 1.5, w = 0.8;
unsigned int n_particles = 10000;
unsigned int n_particles = 100;
ParticleSwarm swarm_01(&mse_sum, domain_bounds, c1, c2, w, n_particles, max_iters);
ParticleSwarm swarm_01(&mse, domain_bounds, c1, c2, w, n_particles, max_iters);
swarm_01.optimize(0.5, 0.02);
swarm_01.optimize(0.5, 0.02, 0.9);
printf("evaluation of error: %f\n", mse.eval());
delete subnet_02;
delete subnet_01;
return 0;
}
\ No newline at end of file
/**
* Example of a set of neural networks sharing some edge weights
*/
//
// Created by martin on 7/16/18.
//
#include <vector>
#include <utility>
#include "../include/4neuro.h"
int main() {
/* TRAIN DATA DEFINITION */
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec_01, data_vec_02;
std::vector<double> inp, out;
inp = {0, 1, 0};
out = {0.5, 0};
data_vec_01.emplace_back(std::make_pair(inp, out));
inp = {1, 0.5, 0};
out = {0.75, 0};
data_vec_01.emplace_back(std::make_pair(inp, out));
// DataSet ds_01(&data_vec_01);
inp = {1.25};
out = {0.63};
data_vec_02.emplace_back(std::make_pair(inp, out));
DataSet ds_02(&data_vec_02);
/* NETWORK DEFINITION */
NeuralNetwork net;
/* Input neurons */
NeuronLinear *i1 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i2 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i3 = new NeuronLinear(1, 1); //f(x) = x + 1
/* Output neurons */
NeuronLinear *o1 = new NeuronLinear(1.0, 2.0); //f(x) = 2x + 1
NeuronLinear *o2 = new NeuronLinear(1, 2); //f(x) = 2x + 1
/* Adding neurons to the nets */
int idx1 = net.add_neuron(i1);
int idx2 = net.add_neuron(i2);
int idx3 = net.add_neuron(o1);
int idx4 = net.add_neuron(i3);
int idx5 = net.add_neuron(o2);
/* Adding connections */
//net.add_connection_simple(idx1, idx3, -1, 1.0);
//net.add_connection_simple(idx2, idx3, -1, 1.0);
net.add_connection_simple(idx1, idx3); // weight index 0
net.add_connection_simple(idx2, idx3); // weight index 1
net.add_connection_simple(idx4, idx5, 0); // AGAIN weight index 0 - same weight!
net.randomize_weights();
/* specification of the input/output neurons */
std::vector<size_t> net_input_neurons_indices(3);
std::vector<size_t> net_output_neurons_indices(2);
net_input_neurons_indices[0] = idx1;
net_input_neurons_indices[1] = idx2;
net_input_neurons_indices[2] = idx4;
net_output_neurons_indices[0] = idx3;
net_output_neurons_indices[1] = idx5;
net.specify_input_neurons(net_input_neurons_indices);
net.specify_output_neurons(net_output_neurons_indices);
/* CONSTRUCTION OF SUBNETWORKS */
std::vector<size_t> subnet_01_input_neurons, subnet_01_output_neurons;
std::vector<size_t> subnet_02_input_neurons, subnet_02_output_neurons;
subnet_01_input_neurons.push_back(idx1);
subnet_01_input_neurons.push_back(idx2);
subnet_01_output_neurons.push_back(idx3);
NeuralNetwork *subnet_01 = net.get_subnet(subnet_01_input_neurons, subnet_01_output_neurons);
subnet_02_input_neurons.push_back(idx4);
subnet_02_output_neurons.push_back(idx5);
NeuralNetwork *subnet_02 = net.get_subnet(subnet_02_input_neurons, subnet_02_output_neurons);
/* COMPLEX ERROR FUNCTION SPECIFICATION */
MSE mse_01(subnet_01, &ds_01);
MSE mse_02(subnet_02, &ds_02);
MSE_SUM mse_sum;
mse_sum.add_error_function( &mse_01 );
mse_sum.add_error_function( &mse_02 );
/* TRAINING METHOD SETUP */
unsigned int n_edges = 2;
unsigned int dim = n_edges, max_iters = 5000;
//must encapsulate each of the partial error functions
double domain_bounds[4] = {-800.0, 800.0, -800.0, 800.0};
double c1 = 0.5, c2 = 1.5, w = 0.8;
unsigned int n_particles = 10;
ParticleSwarm swarm_01(&mse_sum, domain_bounds, c1, c2, w, n_particles, max_iters);
swarm_01.optimize(0.5, 0.02);
delete subnet_02;
delete subnet_01;
return 0;
}
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment