Commit df2f6a73 authored by Michal Kravcenko's avatar Michal Kravcenko

Commit after merge

parent 00085cad
......@@ -66,6 +66,7 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY lib)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY lib)
include_directories(include ${SRC_DIR})
add_subdirectory(${SRC_DIR} ${PROJECT_BINARY_DIR})
message ("Current directory:" ${CMAKE_CURRENT_SOURCE_DIR})
......
#!/bin/bash
#!/bin/sh
#------------#------------------------------------------------------------
# Parameters #
......@@ -9,11 +9,11 @@
# FORTRAN_COMPILER=gfortran
# Build type (Release/Debug)
BUILD_TYPE=Debug
CXX_COMPILER="g++-8"
CXX_COMPILER="g++"
rm -rf build CMakeCache.txt cmake_install.cmake CMakeFiles;
#cmake -G "${MAKEFILE_TYPE}" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_COMPILER=g++-8 .
cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_COMPILER=${CXX_COMPILER} .
cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_COMPILER=${CXX_COMPILER} .
#make VERBOSE=1 -j ${N_CORES} && echo "Build complete." || exit -1;
cmake --build . --config ${BUILD_TYPE} -- -j${N_CORES}
cmake --build . --config ${BUILD_TYPE} -- -j${N_CORES} && (tput setaf 2; echo "Build complete."; tput sgr 0; ) || (tput setaf 1; echo "Build finished with errors!"; tput sgr 0; exit 1;)
#make install;
#TODO make atomic libraries AND also one large library containing all others + one large header
#add_subdirectory(tests bin/unit-tests)
add_subdirectory(examples bin/examples)
add_library(4neuro SHARED
Neuron/Neuron.cpp
Neuron/NeuronBinary.cpp
Neuron/NeuronConstant.cpp
Neuron/NeuronLinear.cpp
Neuron/NeuronLogistic.cpp
Neuron/NeuronTanh.cpp
NetConnection/Connection.cpp
Network/NeuralNetwork.cpp
Neuron/NeuronNeuralNet.cpp
NetConnection/ConnectionWeight.cpp
NetConnection/ConnectionWeightIdentity.cpp
Network/NeuralNetworkSum.cpp
NetConnection/ConnectionFunctionGeneral.cpp
NetConnection/ConnectionFunctionIdentity.cpp
LearningMethods/ParticleSwarm.cpp
DataSet/DataSet.cpp
ErrorFunction/ErrorFunctions.cpp Network/NeuralNetworkSum.cpp Network/NeuralNetworkSum.h Solvers/DESolver.cpp Solvers/DESolver.h)
ErrorFunction/ErrorFunctions.cpp
Solvers/DESolver.cpp
)
target_link_libraries(4neuro boost_serialization)
......
......@@ -368,14 +368,38 @@ NeuralNetwork* NeuralNetwork::get_subnet(std::vector<size_t> &input_neuron_indic
size_t NeuralNetwork::add_neuron(Neuron *n, int bias_idx) {
size_t local_b_idx = 0;
if(bias_idx < 0){
local_b_idx = this->last_used_bias_idx;
size_t local_b_idx = (size_t)bias_idx;
if(this->neuron_biases->size() <= local_b_idx){
std::cerr << "Additional neuron cannot be added! The bias index is too large\n" << std::endl;
exit(-1);
}
else{
local_b_idx = (size_t)bias_idx;
this->outward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
this->inward_adjacency->push_back(new std::vector<std::pair<size_t, size_t>>(0));
this->neurons->push_back(n);
n->set_bias( &(this->neuron_biases->at( local_b_idx )) );
this->in_out_determined = false;
this->layers_analyzed = false;
this->n_neurons++;
this->neuron_potentials->resize(this->n_neurons);
return this->n_neurons - 1;
}
size_t NeuralNetwork::add_neuron(Neuron *n, BIAS_TYPE bt) {
size_t local_b_idx = 0;
if(bt == NOCHANGE){
return this->add_neuron_no_bias( n );
}
local_b_idx = this->last_used_bias_idx;
if(this->neuron_biases->size() <= local_b_idx){
std::cerr << "Additional neuron cannot be added! The bias index is too large\n" << std::endl;
exit(-1);
......
......@@ -21,6 +21,8 @@
enum NET_TYPE{GENERAL};
enum BIAS_TYPE{NEXT, NOCHANGE};
/**
*
......@@ -218,13 +220,21 @@ public:
* @param[in] n
* @return
*/
size_t add_neuron(Neuron* n, int bias_idx = -1 );
size_t add_neuron(Neuron* n, int bias_idx );
/**
* Adds a new neuron to the list of neurons. Also assigns a valid bias value to its activation function
* @param[in] n
* @return
*/
size_t add_neuron(Neuron* n, BIAS_TYPE bt = NOCHANGE );
/**
* Adds a new neuron to this network, does not touch its bias.
* @param n
* @return
*/
//TODO reformulate to use add_neuron(, BIAS_TYPE)
size_t add_neuron_no_bias(Neuron *n);
/**
......
......@@ -20,3 +20,6 @@ target_link_libraries(net_test_3 4neuro)
add_executable(net_test_ode_1 net_test_ode_1.cpp)
target_link_libraries(net_test_ode_1 4neuro)
add_executable(net_test_pde_1 net_test_pde_1.cpp)
target_link_libraries(net_test_pde_1 4neuro)
/**
* DESCRIPTION OF THE FILE
* This file serves for testing of various examples, have fun!
*
* @author Michal Kravčenko
* @date 14.6.18 -
......@@ -16,256 +16,16 @@
#include "Network/NeuralNetwork.h"
#include "Neuron/NeuronLinear.h"
#include "Neuron/NeuronLogistic.h"
#include "NetConnection/Connection.h"
#include "NetConnection/ConnectionWeightIdentity.h"
#include "NetConnection/ConnectionFunctionIdentity.h"
#include "LearningMethods/ParticleSwarm.h"
#include "Neuron/NeuronBinary.h"
#include "Neuron/NeuronTanh.h"
#include "DataSet/DataSet.h"
//TODO rewrite "tests" to separate examples
//TODO prepsat tak, aby neuronova sit managovala destruktory vsech potrebnych objektu (kvuli serializaci)
/**
* Test of simple neural network
* Network should evaluate the function f(x) = x + 1
*/
void test1( ){
std::vector<double> in(1);
std::vector<double> out(1);
NeuralNetwork net;
NeuronLinear* u1 = new NeuronLinear(1.0, 1.0); //f(x) = x + 1.0
NeuronLinear* u2 = new NeuronLinear(0.0, 1.0); //f(x) = x
int idx1 = net.add_neuron(u1);
int idx2 = net.add_neuron(u2);
////////////////////// SIMPLE EDGE WEIGHT ////////////////////////////////////////
// net.add_connection_simple(idx1, idx2, -1, 1.0);
////////////////////// END SIMPLE EDGE WEIGHT ////////////////////////////////////////
/////////////////////////BEGIN OF COMPLEX EDGE WEIGHT//////////////////////////////
std::function<double(double *, size_t*, size_t)> weight_function = [](double * weight_array, size_t * index_array, size_t n_params){
//w(x, y) = x + y
double a = weight_array[index_array[0]];
double b = weight_array[index_array[1]];
// printf("eval: %f, %f\n", a, b);
return (a + 0.0 * b);
};
size_t weight_indices [2] = {0, 1};
double weight_values [2] = {1.0, 5.0};
net.add_connection_general(idx1, idx2, &weight_function, weight_indices, weight_values, 2);
/////////////////////////END OF COMPLEX EDGE WEIGHT//////////////////////////////
for(int i = 0; i < 20; ++i){
in[0] = 0.05 * i;
net.eval_single(in, out);
printf("x = %3.2f, f(x) = %3.2f, expected output = %3.2f\n", in[0], out[0], in[0] + 1.0);
}
//clean-up phase
delete u1;
delete u2;
}
/**
* Test of DataSet serialization
*/
std::vector<double> out_f(std::vector<double> v) {
double sum = 0;
for(auto& e : v) {
sum += e;
}
std::vector<double> out{sum*2, sum*3, sum*4};
return out;
}
void test3() {
/* Manually created data set */
std::cout << "Manually created data set" << std::endl;
std::vector<std::pair<std::vector<double>, std::vector<double>>> data_vec;
std::vector<double> inp, out;
for(int i = 0; i < 3; i++) {
inp.push_back(i);
out.push_back(i+4);
}
data_vec.emplace_back(std::make_pair(inp, out));
DataSet ds(&data_vec);
ds.print_data();
ds.store_text("stored_data.4ds");
DataSet ds2("stored_data.4ds");
ds2.print_data();
/* Isotropic data set */
std::cout << "Isotropic data set:" << std::endl;
std::vector<double> bounds{0,3,0,2};
std::cout << "original one:" << std::endl;
DataSet ds3(bounds, 5, out_f, 3);
ds3.print_data();
ds3.store_text("stored_data2.4ds");
std::cout << "loaded one:" << std::endl;
DataSet ds4("stored_data2.4ds");
ds4.print_data();
}
double particle_swarm_test_function(double *x){
// return x[0] * x[1] - x[0] * x[0] + x[1] * x[2];
return x[0] * x[0];
}
NeuralNetwork net;
std::vector<std::vector<double>*> *train_data_input;
std::vector<std::vector<double>*> *train_data_output;
double test_particle_swarm_neural_net_error_function(double *weights){
net.copy_weights(weights);
unsigned int dim_out = train_data_output->at(0)->size();
// unsigned int dim_in = train_data_input->at(0)->size();
double error = 0.0, val;
std::vector<double> output( dim_out );
for(unsigned int i = 0; i < train_data_input->size(); ++i){
net.eval_single(*train_data_input->at(i), output);
for(unsigned int j = 0; j < dim_out; ++j){
val = output[j] - train_data_output->at(i)->at(j);
error += val * val;
}
}
/*
printf("INPUT: ");
for(unsigned int i = 0; i < dim_in; ++i){
printf("%f ", weights[i]);
}
printf(", ERROR: %f\n", 0.5 * error);
*/
return 0.5 * error;
}
//TODO proper memory management
void test_particle_swarm_neural_net(){
// unsigned int dim_in = 2;
// unsigned int dim_out = 1;
//
// /* TRAIN DATA DEFINITION */
// train_data_input = new std::vector<std::vector<double>*>();
// train_data_output = new std::vector<std::vector<double>*>();
//
// std::vector<double> *input_01 = new std::vector<double>(dim_in);
// std::vector<double> *input_02 = new std::vector<double>(dim_in);
//
// std::vector<double> *output_01 = new std::vector<double>(dim_out);
// std::vector<double> *output_02 = new std::vector<double>(dim_out);
//
// (*input_01)[0] = 0.0;
// (*input_01)[1] = 1.0;
// (*output_01)[0] = 0.5;
//
// (*input_02)[0] = 1.0;
// (*input_02)[1] = 0.5;
// (*output_02)[0] = 0.75;
//
// train_data_input->push_back(input_01);
// train_data_output->push_back(output_01);
//
// train_data_input->push_back(input_02);
// train_data_output->push_back(output_02);
// /* NETWORK DEFINITION */
//
//
// NeuronLinear* i1 = new NeuronLinear(0.0, 1.0); //f(x) = x
// NeuronLinear* i2 = new NeuronLinear(0.0, 1.0); //f(x) = x
//
//// NeuronLogistic* o1 = new NeuronLogistic(1.0, 0.0); //f(x) = (1 + e^(-x + 0.0))^(1.0)
// NeuronLinear* o1 = new NeuronLinear(1.0, 2.0); //f(x) = 2x + 1
//
// int idx1 = net.add_neuron(i1);
// int idx2 = net.add_neuron(i2);
// int idx3 = net.add_neuron(o1);
//
// net.add_connection_simple(idx1, idx3, -1, 1.0);
// net.add_connection_simple(idx2, idx3, -1, 1.0);
//
// /* PARTICLE SETUP */
// double (*F)(double*) = &test_particle_swarm_neural_net_error_function;
//
// unsigned int n_edges = 2;
// unsigned int dim = n_edges, max_iters = 2000;
//
//
// double domain_bounds [4] = {-800.0, 800.0, -800.0, 800.0};
//
// double c1 = 0.5, c2 = 1.5, w = 0.8;
//
// unsigned int n_particles = 10;
//
// ParticleSwarm swarm_01(F, dim, domain_bounds, c1, c2, w, n_particles, max_iters);
//
// swarm_01.optimize(0.5, 0.02);
//
// /* CLEANUP PHASE */
// for( std::vector<double> *input: *train_data_input){
// delete input;
// }
// for( std::vector<double> *output: *train_data_output){
// delete output;
// }
//
// delete train_data_output;
// delete train_data_input;
}
//
//void test_particle_swarm(){
// double (*F)(double*) = &particle_swarm_test_function;
//
// unsigned int dim = 3, max_iters = 100;
//
//// double domain_bounds [2] = {2.0, 3.0};
// double domain_bounds [6] = {-3.0, 3.0, 2.0, 5.0, 1.0, 15.0};
//
// double c1 = 0.5, c2 = 1.5, w = 1.0;
//
// unsigned int n_particles = 1000;
//
// double accuracy = 1e-6;
//
// ParticleSwarm swarm_01(F, dim, domain_bounds, c1, c2, w, n_particles, max_iters);
//
// swarm_01.optimize(0.5, accuracy);
//}
int main(int argc, char** argv){
// test1();
// test_particle_swarm();
test_particle_swarm_neural_net();
// test2();
// test3();
return 0;
}
......@@ -28,14 +28,15 @@ int main() {
DataSet ds(&data_vec);
/* NETWORK DEFINITION */
NeuralNetwork net;
NeuralNetwork net(4, 0);
/* Input neurons */
NeuronLinear *i1 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i2 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i1 = new NeuronLinear( ); //f(x) = x
NeuronLinear *i2 = new NeuronLinear( ); //f(x) = x
/* Output neuron */
NeuronLinear *o1 = new NeuronLinear(1.0, 2.0); //f(x) = 2x + 1
double b = 1.0;//bias
NeuronLinear *o1 = new NeuronLinear( &b ); //f(x) = x + 1
......@@ -66,8 +67,7 @@ int main() {
MSE mse(&net, &ds);
/* TRAINING METHOD SETUP */
unsigned int n_edges = 2;
unsigned int dim = n_edges, max_iters = 2000;
unsigned int max_iters = 2000;
double domain_bounds[4] = {-800.0, 800.0, -800.0, 800.0};
......
......@@ -36,14 +36,15 @@ int main() {
NeuralNetwork net;
/* Input neurons */
NeuronLinear *i1 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i2 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i1 = new NeuronLinear( ); //f(x) = x
NeuronLinear *i2 = new NeuronLinear( ); //f(x) = x
NeuronLinear *i3 = new NeuronLinear(1, 1); //f(x) = x + 1
double b = 1;//bias
NeuronLinear *i3 = new NeuronLinear(&b); //f(x) = x + 1
/* Output neurons */
NeuronLinear *o1 = new NeuronLinear(1.0, 2.0); //f(x) = 2x + 1
NeuronLinear *o2 = new NeuronLinear(1, 2); //f(x) = 2x + 1
NeuronLinear *o1 = new NeuronLinear(&b); //f(x) = x + 1
NeuronLinear *o2 = new NeuronLinear(&b); //f(x) = x + 1
......
......@@ -39,14 +39,15 @@ int main() {
NeuralNetwork net;
/* Input neurons */
NeuronLinear *i1 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i2 = new NeuronLinear(0.0, 1.0); //f(x) = x
NeuronLinear *i1 = new NeuronLinear(); //f(x) = x
NeuronLinear *i2 = new NeuronLinear(); //f(x) = x
NeuronLinear *i3 = new NeuronLinear(1, 1); //f(x) = x + 1
double b = 1;//bias
NeuronLinear *i3 = new NeuronLinear( &b ); //f(x) = x + 1
/* Output neurons */
NeuronLinear *o1 = new NeuronLinear(1.0, 2.0); //f(x) = 2x + 1
NeuronLinear *o2 = new NeuronLinear(1, 2); //f(x) = 2x + 1
NeuronLinear *o1 = new NeuronLinear(&b); //f(x) = x + 1
NeuronLinear *o2 = new NeuronLinear(&b); //f(x) = x + 1
......
......@@ -18,8 +18,8 @@
#include <random>
#include <iostream>
#include "../include/4neuro.h"
#include "Solvers/DESolver.h"
#include "../../include/4neuro.h"
#include "../Solvers/DESolver.h"
void test_odr(size_t n_inner_neurons){
......@@ -28,6 +28,7 @@ void test_odr(size_t n_inner_neurons){
size_t train_size = 10;
double d1_s = 0.0, d1_e = 1.0;
/* swarm optimizer properties */
unsigned int max_iters = 100;
unsigned int n_particles = 10;
......
......@@ -15,109 +15,110 @@
#include "4neuro.h"
int main() {
NeuronLinear n(2, 3);
std::cout << n.get_potential() << " "
<< n.get_state() << " "
<< n.activation_function_get_parameter(0) << " "
<< n.activation_function_get_parameter(1) << std::endl;
std::ofstream ofs("stored_neuron.4n");
{
boost::archive::text_oarchive oa(ofs);
oa << n;
ofs.close();
}
NeuronLinear n2;
{
std::ifstream ifs("stored_neuron.4n");
boost::archive::text_iarchive ia(ifs);
ia >> n2;
ifs.close();
}
std::cout << n2.get_potential() << " "
<< n2.get_state() << " "
<< n2.activation_function_get_parameter(0) << " "
<< n2.activation_function_get_parameter(1) << std::endl;
NeuronLinear n3(0.62, 0.4);
std::cout << n3.get_potential() << " "
<< n3.get_state() << " "
<< n3.activation_function_get_parameter(0) << " "
<< n3.activation_function_get_parameter(1) << std::endl;
std::ofstream ofs2("stored_neuron2.4n");
{
boost::archive::text_oarchive oa(ofs2);
oa << n3;
ofs2.close();
}
NeuronLogistic n4;
{
std::ifstream ifs("stored_neuron2.4n");
boost::archive::text_iarchive ia(ifs);
ia >> n4;
ifs.close();
}
std::cout << n4.get_potential() << " "
<< n4.get_state() << " "
<< n4.activation_function_get_parameter(0) << " "
<< n4.activation_function_get_parameter(1) << std::endl;
NeuronTanh n5(0.5);
std::cout << n5.get_potential() << " "
<< n5.get_state() << " "
<< n5.activation_function_get_parameter(0) << std::endl;
std::ofstream ofs3("stored_neuron3.4n");
{
boost::archive::text_oarchive oa(ofs3);
oa << n5;
ofs3.close();
}
NeuronTanh n6;
{
std::ifstream ifs("stored_neuron3.4n");
boost::archive::text_iarchive ia(ifs);
ia >> n6;
ifs.close();
}
std::cout << n6.get_potential() << " "
<< n6.get_state() << " "
<< n6.activation_function_get_parameter(0) << std::endl;
NeuronBinary n7(0.71);
std::cout << n7.get_potential() << " "
<< n7.get_state() << " "
<< n7.activation_function_get_parameter(0) << std::endl;
std::ofstream ofs4("stored_neuron4.4n");
{
boost::archive::text_oarchive oa(ofs4);
oa << n7;
ofs4.close();
}
NeuronBinary n8;
{
std::ifstream ifs("stored_neuron4.4n");
boost::archive::text_iarchive ia(ifs);
ia >> n8;
ifs.close();
}
std::cout << n8.get_potential() << " "
<< n8.get_state() << " "
<< n8.activation_function_get_parameter(0) << std::endl;
// double b = 3;//bias
// NeuronLinear n(&b);//x + 3
//
// std::cout << n.get_potential() << " "
// << n.get_state() << " "
// << n.activation_function_get_parameter(0) << " "
// << n.activation_function_get_parameter(1) << std::endl;
//
// std::ofstream ofs("stored_neuron.4n");
// {
// boost::archive::text_oarchive oa(ofs);
// oa << n;
// ofs.close();
// }
//
// NeuronLinear n2;
// {
// std::ifstream ifs("stored_neuron.4n");
// boost::archive::text_iarchive ia(ifs);
// ia >> n2;
// ifs.close();
// }
//
// std::cout << n2.get_potential() << " "
// << n2.get_state() << " "
// << n2.activation_function_get_parameter(0) << " "
// << n2.activation_function_get_parameter(1) << std::endl;
//
// NeuronLinear n3(0.62, 0.4);
//
// std::cout << n3.get_potential() << " "
// << n3.get_state() << " "
// << n3.activation_function_get_parameter(0) << " "
// << n3.activation_function_get_parameter(1) << std::endl;
//
// std::ofstream ofs2("stored_neuron2.4n");
// {
// boost::archive::text_oarchive oa(ofs2);
// oa << n3;
// ofs2.close();
// }
//
// NeuronLogistic n4;
// {
// std::ifstream ifs("stored_neuron2.4n");
// boost::archive::text_iarchive ia(ifs);
// ia >> n4;
// ifs.close();
// }
//
// std::cout << n4.get_potential() << " "
// << n4.get_state() << " "
// << n4.activation_function_get_parameter(0) << " "
// << n4.activation_function_get_parameter(1) << std::endl;
//
// NeuronTanh n5(0.5);
//
// std::cout << n5.get_potential() << " "
// << n5.get_state() << " "
// << n5.activation_function_get_parameter(0) << std::endl;
//
// std::ofstream ofs3("stored_neuron3.4n");
// {
// boost::archive::text_oarchive oa(ofs3);
// oa << n5;
// ofs3.close();
// }
//
// NeuronTanh n6;
// {
// std::ifstream ifs("stored_neuron3.4n");
// boost::archive::text_iarchive ia(ifs);
// ia >> n6;
// ifs.close();
// }
//
// std::cout << n6.get_potential() << " "
// << n6.get_state() << " "
// << n6.activation_function_get_parameter(0) << std::endl;
//
// NeuronBinary n7(0.71);
//
// std::cout << n7.get_potential() << " "
// << n7.get_state() << " "
// << n7.activation_function_get_parameter(0) << std::endl;
//
// std::ofstream ofs4("stored_neuron4.4n");
// {
// boost::archive::text_oarchive oa(ofs4);
// oa << n7;
// ofs4.close();
// }
//
// NeuronBinary n8;
// {
// std::ifstream ifs("stored_neuron4.4n");
// boost::archive::text_iarchive ia(ifs);
// ia >> n8;
// ifs.close();
// }
//
// std::cout << n8.get_potential() << " "
// << n8.get_state() << " "
// << n8.activation_function_get_parameter(0) << std::endl;
return 0;
}