Newer
Older
Martin Beseda
committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
//
// Created by martin on 19.08.19.
//
#include "../settings.h"
#include "ACSFNeuralNetwork.h"
lib4neuro::ACSFNeuralNetwork::ACSFNeuralNetwork(std::unordered_map<ELEMENT_SYMBOL, Element*>& elements,
std::vector<ELEMENT_SYMBOL>& elements_list,
bool with_charge,
std::unordered_map<ELEMENT_SYMBOL, std::vector<unsigned int>> n_hidden_neurons,
std::unordered_map<ELEMENT_SYMBOL, std::vector<NEURON_TYPE>> type_hidden_neurons) {
/* Construct the neural network */
std::vector<size_t> inputs;
std::vector<size_t> subnet_outputs;
size_t neuron_idx;
for(size_t i = 0; i < elements_list.size(); i++) {
std::vector<size_t> previous_layer;
std::vector<size_t> new_layer;
/* Create input neurons for sub-net */
std::shared_ptr<NeuronLinear> inp_n;
for(size_t j = 0; j < elements[elements_list.at(i)]->getSymmetryFunctions().size(); j++) {
inp_n = std::make_shared<NeuronLinear>();
neuron_idx = this->add_neuron(inp_n, BIAS_TYPE::NO_BIAS);
previous_layer.emplace_back(neuron_idx);
inputs.emplace_back(neuron_idx);
}
/* Add an additional input neuron for charge, if provided */
if(with_charge) {
inp_n = std::make_shared<NeuronLinear>();
neuron_idx = this->add_neuron(inp_n, BIAS_TYPE::NO_BIAS);
previous_layer.emplace_back(neuron_idx);
inputs.emplace_back(neuron_idx);
}
/* Create hidden layers in sub-net */
std::vector<unsigned int> n_neurons = n_hidden_neurons[elements_list.at(i)];
std::vector<NEURON_TYPE> types = type_hidden_neurons[elements_list.at(i)];
for(size_t j = 0; j < n_neurons.size(); j++) { /* Iterate over hidden layers */
/* Create hidden neurons */
for(size_t k = 0; k < n_neurons.at(j); k++) {
std::shared_ptr<Neuron> hid_n;
switch(types.at(j)) {
case NEURON_TYPE::LOGISTIC: {
hid_n = std::make_shared<NeuronLogistic>();
break;
}
case NEURON_TYPE::BINARY: {
hid_n = std::make_shared<NeuronBinary>();
break;
}
case NEURON_TYPE::CONSTANT: {
hid_n = std::make_shared<NeuronConstant>();
break;
}
case NEURON_TYPE::LINEAR: {
hid_n = std::make_shared<NeuronLinear>();
break;
}
}
neuron_idx = this->add_neuron(hid_n, BIAS_TYPE::NEXT_BIAS);
new_layer.emplace_back(neuron_idx);
/* Connect hidden neuron to the previous layer */
for(auto prev_n : previous_layer) {
this->add_connection_simple(prev_n, neuron_idx, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
}
previous_layer = new_layer;
new_layer.clear();
}
}
/* Create output neurons for sub-net */
std::shared_ptr<NeuronLinear> sub_out_n = std::make_shared<NeuronLinear>();
neuron_idx = this->add_neuron(sub_out_n, BIAS_TYPE::NO_BIAS);
subnet_outputs.emplace_back(neuron_idx);
for(auto prev_n : previous_layer) {
this->add_connection_simple(prev_n, neuron_idx);
}
}
/* Specify network inputs */
this->specify_input_neurons(inputs);
/* Create final output layer */
std::shared_ptr<NeuronLinear> final_out_n = std::make_shared<NeuronLinear>();
neuron_idx = this->add_neuron(final_out_n, BIAS_TYPE::NO_BIAS);
for(auto subnet_output : subnet_outputs) {
this->add_connection_constant(subnet_output, neuron_idx, 1);
}
std::vector<size_t> outputs = {neuron_idx};
this->specify_output_neurons(outputs);
}