Skip to content
Snippets Groups Projects
Commit 45bb931c authored by Martin Beseda's avatar Martin Beseda
Browse files

[WIP] Looking for an optimal set of symmetry functions.

parent ec1b5267
No related branches found
No related tags found
No related merge requests found
......@@ -20,7 +20,6 @@ lib4neuro::ACSFNeuralNetwork::ACSFNeuralNetwork(std::unordered_map<ELEMENT_SYMBO
size_t last_neuron_bias_idx = 0;
size_t last_connection_weight_idx = 0;
std::shared_ptr<Neuron> output_neuron = std::make_shared<NeuronLinear>();
size_t last_neuron_idx = this->add_neuron(output_neuron, BIAS_TYPE::NO_BIAS);
std::vector<size_t> outputs = {last_neuron_idx};
......
//
// Created by martin on 20.08.19.
//
#include <exception>
#include <4neuro.h>
......@@ -103,45 +106,29 @@ double optimize_via_LBMQ(l4n::NeuralNetwork& net,
return err;
}
void print_into_file(const char * fn, std::shared_ptr<l4n::DataSet> &ds, l4n::NeuralNetwork &net){
std::ofstream outfile;
outfile.open(fn, std::ios::out );
std::vector<double> output;
output.resize(1);
for(auto e : *ds->get_data()) {
for(auto inp_e : e.first) {
outfile << inp_e << " ";
}
outfile << e.second.at(0) << " ";
net.eval_single(e.first, output);
outfile << output.at(0) << std::endl;
}
outfile.close();
}
int main() {
try{
/* Specify cutoff functions */
l4n::CutoffFunction1 cutoff1(10.1);
l4n::CutoffFunction2 cutoff2(12.5);
l4n::CutoffFunction2 cutoff3(15.2);
l4n::CutoffFunction2 cutoff4(10.3);
l4n::CutoffFunction2 cutoff5(12.9);
// l4n::CutoffFunction1 cutoff1(10.1);
l4n::CutoffFunction2 cutoff1(8);
// l4n::CutoffFunction2 cutoff2(15.2);
// l4n::CutoffFunction2 cutoff4(10.3);
// l4n::CutoffFunction2 cutoff5(12.9);
// l4n::CutoffFunction2 cutoff6(11);
/* Specify symmetry functions */
l4n::G1 sym_f1(&cutoff1);
l4n::G2 sym_f2(&cutoff2, 0.15, 0.75);
l4n::G2 sym_f3(&cutoff3, 0.1, 0.2);
l4n::G3 sym_f4(&cutoff4, 0.3);
l4n::G4 sym_f5(&cutoff5, 0.05, true, 0.05);
l4n::G4 sym_f6(&cutoff5, 0.05, false, 0.05);
l4n::G2 sym_f2(&cutoff1, 15, 8);
l4n::G2 sym_f3(&cutoff1, 10, 4);
// l4n::G3 sym_f4(&cutoff4, 0.3);
// l4n::G4 sym_f5(&cutoff5, 0.05, true, 0.05);
// l4n::G4 sym_f6(&cutoff5, 0.05, false, 0.05);
// l4n::G4 sym_f7(&cutoff6, 0.5, true, 0.05);
// l4n::G4 sym_f8(&cutoff6, 0.5, false, 0.05);
std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f1, &sym_f2, &sym_f3, &sym_f4, &sym_f5, &sym_f6};
std::vector<l4n::SymmetryFunction*> helium_sym_funcs = {&sym_f1, &sym_f2, &sym_f3}; //, &sym_f4, &sym_f5, &sym_f6, &sym_f7, &sym_f8};
l4n::Element helium = l4n::Element("He",
helium_sym_funcs);
......@@ -149,7 +136,7 @@ int main() {
elements[l4n::ELEMENT_SYMBOL::He] = &helium;
/* Read data */
l4n::XYZReader reader("../../data/HE21+T4.xyz");
l4n::XYZReader reader("/home/martin/Desktop/HE21+T2.xyz");
reader.read();
std::cout << "Finished reading data" << std::endl;
......@@ -158,39 +145,73 @@ int main() {
/* Create a neural network */
std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<unsigned int>> n_hidden_neurons;
n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {10};
n_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {2, 1};
std::unordered_map<l4n::ELEMENT_SYMBOL, std::vector<l4n::NEURON_TYPE>> type_hidden_neurons;
type_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {l4n::NEURON_TYPE::LOGISTIC};
type_hidden_neurons[l4n::ELEMENT_SYMBOL::He] = {l4n::NEURON_TYPE::LOGISTIC, l4n::NEURON_TYPE::LINEAR};
l4n::ACSFNeuralNetwork net(elements, *reader.get_element_list(), reader.contains_charge(), n_hidden_neurons, type_hidden_neurons);
// l4n::NeuralNetwork net;
// std::vector<std::shared_ptr<l4n::NeuronLinear>> inps;
// std::vector<size_t> inps_inds;
// for(unsigned int i = 0; i < 126; i++) {
// std::shared_ptr<l4n::NeuronLinear> inp = std::make_shared<l4n::NeuronLinear>();
// inps.emplace_back(inp);
// inps_inds.emplace_back(net.add_neuron(inp, l4n::BIAS_TYPE::NO_BIAS));
// }
//
// net.specify_input_neurons(inps_inds);
//
// std::vector<std::shared_ptr<l4n::NeuronLogistic>> hids;
//
// std::vector<unsigned int> hids_idxs;
// size_t idx;
// unsigned int n_hidden = 5;
// for(unsigned int i = 0; i < n_hidden; i++) {
// std::shared_ptr<l4n::NeuronLogistic> hid = std::make_shared<l4n::NeuronLogistic>();
// hids.emplace_back(hid);
// idx = net.add_neuron(hid, l4n::BIAS_TYPE::NEXT_BIAS);
// hids_idxs.emplace_back(idx);
//
// for(unsigned int j = 0; j < 126; j++) {
// net.add_connection_simple(j, idx);
// }
// }
//
// std::shared_ptr<l4n::NeuronLinear> out = std::make_shared<l4n::NeuronLinear>();
// idx = net.add_neuron(out, l4n::BIAS_TYPE::NO_BIAS);
// std::vector<size_t> out_inds = {idx};
// for(unsigned int i = 0; i < n_hidden; i++) {
// net.add_connection_simple(hids_idxs.at(i), idx);
// }
// net.specify_output_neurons(out_inds);
l4n::MSE mse(&net, ds.get());
net.randomize_parameters();
// optimize_via_particle_swarm(net, mse);
// double err1 = optimize_via_LBMQ(net, mse);
double err1 = optimize_via_LBMQ(net, mse);
double err2 = optimize_via_gradient_descent(net, mse);
print_into_file("test_results_2k_BB.txt", ds, net);
/* Print fit comparison with real data */
// std::vector<double> output;
// output.resize(1);
// for(auto e : *ds->get_data()) {
// for(auto inp_e : e.first) {
// std::cout << inp_e << " ";
// }
// std::cout << e.second.at(0) << " ";
// net.eval_single(e.first, output);
// std::cout << output.at(0) << std::endl;
// }
if(err2 > 0.00001) {
if(err2 > 0.00001) {
throw std::runtime_error("Training was incorrect!");
}
/* Print fit comparison with real data */
std::vector<double> output;
output.resize(1);
for(auto e : *ds->get_data()) {
for(unsigned int i = 0; i < e.first.size(); i++) {
std::cout << e.first.at(i) << " ";
if(i % 3 == 2) {
std::cout << std::endl;
}
}
std::cout << e.second.at(0) << " ";
net.eval_single(e.first, output);
std::cout << output.at(0) << std::endl;
}
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
......@@ -198,4 +219,4 @@ int main() {
}
return 0;
}
\ No newline at end of file
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment