Skip to content
Snippets Groups Projects
Commit 569d5c90 authored by Martin Beseda's avatar Martin Beseda
Browse files

[WIP] Fixing ACSF NN constructor.

parent 57df9b10
No related branches found
No related tags found
No related merge requests found
......@@ -170,6 +170,7 @@ namespace lib4neuro {
gradient_norm = 0;
// TODO parallelize with OpenMP?
for (size_t ci = 0; ci < n_parameters; ++ci) {
mem_double = rhs[ci];
mem_double *= mem_double;
......
......@@ -14,6 +14,11 @@ lib4neuro::ACSFNeuralNetwork::ACSFNeuralNetwork(std::unordered_map<ELEMENT_SYMBO
std::vector<size_t> inputs;
std::vector<size_t> subnet_outputs;
size_t neuron_idx;
std::unordered_map<ELEMENT_SYMBOL, std::vector<size_t>> subnet_neuron_idxs;
std::unordered_map<ELEMENT_SYMBOL, std::vector<size_t>> subnet_connection_idxs;
for(size_t i = 0; i < elements_list.size(); i++) {
std::vector<size_t> previous_layer;
std::vector<size_t> new_layer;
......@@ -35,9 +40,20 @@ lib4neuro::ACSFNeuralNetwork::ACSFNeuralNetwork(std::unordered_map<ELEMENT_SYMBO
inputs.emplace_back(neuron_idx);
}
/* Create subnet for the current element */
bool new_subnet = false;
if(subnet_neuron_idxs.find(elements_list.at(i)) == subnet_neuron_idxs.end()) {
new_subnet = true;
subnet_neuron_idxs[elements_list.at(i)] = std::vector<size_t>();
subnet_connection_idxs[elements_list.at(i)] = std::vector<size_t>();
}
/* Create hidden layers in sub-net */
std::vector<unsigned int> n_neurons = n_hidden_neurons[elements_list.at(i)];
std::vector<NEURON_TYPE> types = type_hidden_neurons[elements_list.at(i)];
size_t local_neuron_idx = 0;
size_t local_connection_idx = 0;
for(size_t j = 0; j < n_neurons.size(); j++) { /* Iterate over hidden layers */
/* Create hidden neurons */
for(size_t k = 0; k < n_neurons.at(j); k++) {
......@@ -61,18 +77,74 @@ lib4neuro::ACSFNeuralNetwork::ACSFNeuralNetwork(std::unordered_map<ELEMENT_SYMBO
}
}
neuron_idx = this->add_neuron(hid_n, BIAS_TYPE::NEXT_BIAS);
if(new_subnet) {
neuron_idx = this->add_neuron(hid_n,
BIAS_TYPE::NEXT_BIAS);
subnet_neuron_idxs[elements_list.at(i)].emplace_back(neuron_idx);
} else {
neuron_idx = this->add_neuron(hid_n,
BIAS_TYPE::EXISTING_BIAS, subnet_neuron_idxs[elements_list.at(i)].at(0)+local_neuron_idx);
}
local_neuron_idx++;
new_layer.emplace_back(neuron_idx);
/* Connect hidden neuron to the previous layer */
for(auto prev_n : previous_layer) {
this->add_connection_simple(prev_n, neuron_idx, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
if(new_subnet) {
subnet_connection_idxs[elements_list.at(i)].emplace_back(this->add_connection_simple(prev_n,
neuron_idx,
SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT));
} else {
this->add_connection_simple(prev_n,
neuron_idx,
SIMPLE_CONNECTION_TYPE::EXISTING_WEIGHT, subnet_connection_idxs[elements_list.at(i)].at(0)+local_connection_idx);
}
local_connection_idx++;
}
previous_layer = new_layer;
new_layer.clear();
}
previous_layer = new_layer;
new_layer.clear();
}
/* Create hidden layers in sub-net */
// std::vector<unsigned int> n_neurons = n_hidden_neurons[elements_list.at(i)];
// std::vector<NEURON_TYPE> types = type_hidden_neurons[elements_list.at(i)];
// for(size_t j = 0; j < n_neurons.size(); j++) { /* Iterate over hidden layers */
// /* Create hidden neurons */
// for(size_t k = 0; k < n_neurons.at(j); k++) {
// std::shared_ptr<Neuron> hid_n;
// switch(types.at(j)) {
// case NEURON_TYPE::LOGISTIC: {
// hid_n = std::make_shared<NeuronLogistic>();
// break;
// }
// case NEURON_TYPE::BINARY: {
// hid_n = std::make_shared<NeuronBinary>();
// break;
// }
// case NEURON_TYPE::CONSTANT: {
// hid_n = std::make_shared<NeuronConstant>();
// break;
// }
// case NEURON_TYPE::LINEAR: {
// hid_n = std::make_shared<NeuronLinear>();
// break;
// }
// }
//
// neuron_idx = this->add_neuron(hid_n, BIAS_TYPE::NEXT_BIAS);
// new_layer.emplace_back(neuron_idx);
//
// /* Connect hidden neuron to the previous layer */
// for(auto prev_n : previous_layer) {
// this->add_connection_simple(prev_n, neuron_idx, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
// }
// previous_layer = new_layer;
// new_layer.clear();
// }
// }
/* Create output neurons for sub-net */
std::shared_ptr<NeuronLinear> sub_out_n = std::make_shared<NeuronLinear>();
neuron_idx = this->add_neuron(sub_out_n, BIAS_TYPE::NO_BIAS);
......
......@@ -260,7 +260,8 @@ namespace lib4neuro {
}
if (this->input_neuron_indices.size() != input.size()) {
THROW_INVALID_ARGUMENT_ERROR("Data input size != Network input size");
THROW_INVALID_ARGUMENT_ERROR("Network input size(" + std::to_string(this->input_neuron_indices.size())
+ ") != Data input size(" + std::to_string(input.size()) + ")");
}
if (this->output_neuron_indices.size() != output.size()) {
......@@ -1051,127 +1052,5 @@ namespace lib4neuro {
error_partial[i] = 0;
}
}
// NeuralNetwork::NeuralNetwork(std::unordered_map<ELEMENT_SYMBOL, Element*, ELEMENT_SYMBOL_HASH>* elements,
// std::vector<std::pair<ELEMENT_SYMBOL, std::vector<double>>>* particles) {
//
// }
NeuralNetwork::NeuralNetwork(std::unordered_map<ELEMENT_SYMBOL, Element*>& elements,
std::vector<std::pair<ELEMENT_SYMBOL,
std::vector<double>>>& particles) {
/* Create a new set of Atomic-Centered Symmetry Functions serving as coordinates */
// std::vector<std::vector<double>> acsf_coords(particles.size());
// std::vector<double> acsf_coords_single;
// for(size_t i = 0; i < particles.size(); i++) { /* Iterate over all the particles */
// for (auto sym_func : elements[particles.at(i).first]->getSymmetryFunctions()) {
// acsf_coords_single.emplace_back(sym_func->eval(i, particles));
// }
//
// acsf_coords.at(i) = acsf_coords_single;
// acsf_coords_single.clear();
// }
//
// /* Check for duplicates in new coordinates */
// std::vector<std::vector<double>> unique_coords = acsf_coords;
// std::sort(unique_coords.begin(), unique_coords.end());
// unique_coords.erase(std::unique(unique_coords.begin(), unique_coords.end()), unique_coords.end());
//
// if(unique_coords.size() != acsf_coords.size()) {
// THROW_RUNTIME_ERROR("Not all descriptors are unique with currently specified symmetry functions!");
// }
//
// for(auto e : acsf_coords) {
// for(auto ee : e) {
// std::cout << ee << " ";
// }
// std::cout << std::endl;
// }
/* Construct the neural network */
// std::vector<size_t> inputs;
// std::vector<size_t> subnet_outputs;
// size_t neuron_idx;
// for(size_t i = 0; i < particles.size(); i++) {
// std::vector<size_t> previous_layer;
// std::vector<size_t> new_layer;
//
// /* Create input neurons for sub-net */
// std::shared_ptr<NeuronLinear> inp_n;
// for(size_t j = 0; j < acsf_coords.at(i).size(); j++) {
// inp_n = std::make_shared<NeuronLinear>();
// neuron_idx = this->add_neuron(inp_n, BIAS_TYPE::NO_BIAS);
// previous_layer.emplace_back(neuron_idx);
// inputs.emplace_back(neuron_idx);
// }
//
// /* Add an additional input neuron for charge, if provided */
// if(elements[particles.at(i).first]->isCharge()) {
// inp_n = std::make_shared<NeuronLinear>();
// neuron_idx = this->add_neuron(inp_n, BIAS_TYPE::NO_BIAS);
// previous_layer.emplace_back(neuron_idx);
// inputs.emplace_back(neuron_idx);
// }
//
// /* Create hidden layers in sub-net */
// std::vector<unsigned int> n_neurons = elements[particles.at(i).first]->getNHiddenNeurons();
// std::vector<NEURON_TYPE> types = elements[particles.at(i).first]->getHiddenNeuronTypes();
// for(size_t j = 0; j < n_neurons.size(); j++) { /* Iterate over hidden layers */
// /* Create hidden neurons */
// for(size_t k = 0; k < n_neurons.at(j); k++) {
// std::shared_ptr<Neuron> hid_n;
// switch(types.at(j)) {
// case NEURON_TYPE::LOGISTIC: {
// hid_n = std::make_shared<NeuronLogistic>();
// break;
// }
// case NEURON_TYPE::BINARY: {
// hid_n = std::make_shared<NeuronBinary>();
// break;
// }
// case NEURON_TYPE::CONSTANT: {
// hid_n = std::make_shared<NeuronConstant>();
// break;
// }
// case NEURON_TYPE::LINEAR: {
// hid_n = std::make_shared<NeuronLinear>();
// break;
// }
// }
//
// neuron_idx = this->add_neuron(hid_n, BIAS_TYPE::NEXT_BIAS);
// new_layer.emplace_back(neuron_idx);
//
// /* Connect hidden neuron to the previous layer */
// for(auto prev_n : previous_layer) {
// this->add_connection_simple(prev_n, neuron_idx, SIMPLE_CONNECTION_TYPE::NEXT_WEIGHT);
// }
// previous_layer = new_layer;
// new_layer.clear();
// }
// }
//
// /* Create output neurons for sub-net */
// std::shared_ptr<NeuronLinear> sub_out_n = std::make_shared<NeuronLinear>();
// neuron_idx = this->add_neuron(sub_out_n, BIAS_TYPE::NO_BIAS);
// subnet_outputs.emplace_back(neuron_idx);
// for(auto prev_n : previous_layer) {
// this->add_connection_simple(prev_n, neuron_idx);
// }
// }
//
// /* Specify network inputs */
// this->specify_input_neurons(inputs);
//
// /* Create final output layer */
// std::shared_ptr<NeuronLinear> final_out_n = std::make_shared<NeuronLinear>();
// neuron_idx = this->add_neuron(final_out_n, BIAS_TYPE::NO_BIAS);
// for(auto subnet_output : subnet_outputs) {
// this->add_connection_constant(subnet_output, neuron_idx, 1);
// }
// std::vector<size_t> outputs = {neuron_idx};
// this->specify_output_neurons(outputs);
}
}
......@@ -235,12 +235,6 @@ namespace lib4neuro {
*/
LIB4NEURO_API explicit NeuralNetwork(std::string filepath);
// TODO remove method
LIB4NEURO_API explicit NeuralNetwork(std::unordered_map<ELEMENT_SYMBOL,
Element*>& elements,
std::vector<std::pair<ELEMENT_SYMBOL,
std::vector<double>>>& particles);
/**
*
*/
......
......@@ -133,7 +133,7 @@ int main() {
elements[l4n::ELEMENT_SYMBOL::He] = &helium;
/* Read data */
l4n::XYZReader reader("../../data/HE21+T4.xyz");
l4n::XYZReader reader("/home/martin/lib4neuro/data/HE21+T1.xyz");
reader.read();
std::cout << "Finished reading data" << std::endl;
......@@ -142,18 +142,52 @@ int main() {
/* Create a neural network */
std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<unsigned int>> n_hidden_neurons;
n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {10};
n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {2};
std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<l4n::NEURON_TYPE>> type_hidden_neurons;
type_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {l4n::NEURON_TYPE::LOGISTIC};
l4n::ACSFNeuralNetwork net(elements, *reader.get_element_list(), reader.contains_charge(), n_hidden_neurons, type_hidden_neurons);
// l4n::NeuralNetwork net;
// std::vector<std::shared_ptr<l4n::NeuronLinear>> inps;
// std::vector<size_t> inps_inds;
// for(unsigned int i = 0; i < 126; i++) {
// std::shared_ptr<l4n::NeuronLinear> inp = std::make_shared<l4n::NeuronLinear>();
// inps.emplace_back(inp);
// inps_inds.emplace_back(net.add_neuron(inp, l4n::BIAS_TYPE::NO_BIAS));
// }
//
// net.specify_input_neurons(inps_inds);
//
// std::vector<std::shared_ptr<l4n::NeuronLogistic>> hids;
//
// std::vector<unsigned int> hids_idxs;
// size_t idx;
// unsigned int n_hidden = 5;
// for(unsigned int i = 0; i < n_hidden; i++) {
// std::shared_ptr<l4n::NeuronLogistic> hid = std::make_shared<l4n::NeuronLogistic>();
// hids.emplace_back(hid);
// idx = net.add_neuron(hid, l4n::BIAS_TYPE::NEXT_BIAS);
// hids_idxs.emplace_back(idx);
//
// for(unsigned int j = 0; j < 126; j++) {
// net.add_connection_simple(j, idx);
// }
// }
//
// std::shared_ptr<l4n::NeuronLinear> out = std::make_shared<l4n::NeuronLinear>();
// idx = net.add_neuron(out, l4n::BIAS_TYPE::NO_BIAS);
// std::vector<size_t> out_inds = {idx};
// for(unsigned int i = 0; i < n_hidden; i++) {
// net.add_connection_simple(hids_idxs.at(i), idx);
// }
// net.specify_output_neurons(out_inds);
l4n::MSE mse(&net, ds.get());
net.randomize_parameters();
// optimize_via_particle_swarm(net, mse);
// double err1 = optimize_via_LBMQ(net, mse);
double err1 = optimize_via_LBMQ(net, mse);
double err2 = optimize_via_gradient_descent(net, mse);
if(err2 > 0.00001) {
......@@ -161,17 +195,17 @@ int main() {
}
/* Print fit comparison with real data */
// std::vector<double> output;
// output.resize(1);
//
// for(auto e : *ds->get_data()) {
// for(auto inp_e : e.first) {
// std::cout << inp_e << " ";
// }
// std::cout << e.second.at(0) << " ";
// net.eval_single(e.first, output);
// std::cout << output.at(0) << std::endl;
// }
std::vector<double> output;
output.resize(1);
for(auto e : *ds->get_data()) {
for(auto inp_e : e.first) {
std::cout << inp_e << " ";
}
std::cout << e.second.at(0) << " ";
net.eval_single(e.first, output);
std::cout << output.at(0) << std::endl;
}
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment