Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
lib4neuro
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container Registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
MolDyn
lib4neuro
Commits
a47613d0
Commit
a47613d0
authored
5 years ago
by
bes0030
Browse files
Options
Downloads
Patches
Plain Diff
[WIP] trying to find an optimal configuration of network to fit He4+ cluster
parent
4561bfc3
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/examples/dev_sandbox.cpp
+18
-20
18 additions, 20 deletions
src/examples/dev_sandbox.cpp
with
18 additions
and
20 deletions
src/examples/dev_sandbox.cpp
+
18
−
20
View file @
a47613d0
...
...
@@ -12,15 +12,15 @@ void optimize_via_particle_swarm(l4n::NeuralNetwork& net,
std
::
vector
<
double
>
domain_bounds
(
2
*
(
net
.
get_n_weights
()
+
net
.
get_n_biases
()));
for
(
size_t
i
=
0
;
i
<
domain_bounds
.
size
()
/
2
;
++
i
)
{
domain_bounds
[
2
*
i
]
=
-
10
;
domain_bounds
[
2
*
i
+
1
]
=
10
;
domain_bounds
[
2
*
i
]
=
-
1
5
0
;
domain_bounds
[
2
*
i
+
1
]
=
1
5
0
;
}
double
c1
=
1.7
;
double
c2
=
1.7
;
double
w
=
0.7
;
size_t
n_particles
=
1
00
;
size_t
iter_max
=
3
0
;
size_t
n_particles
=
3
00
;
size_t
iter_max
=
50
0
;
/* if the maximal velocity from the previous step is less than 'gamma' times the current maximal velocity, then one
* terminating criterion is met */
...
...
@@ -60,7 +60,8 @@ double optimize_via_gradient_descent(l4n::NeuralNetwork& net,
<<
"***********************************************************************************************************************"
<<
std
::
endl
;
l4n
::
GradientDescentBB
gd
(
1e-6
,
1000
);
1000
,
60000
);
gd
.
optimize
(
ef
);
...
...
@@ -113,18 +114,18 @@ int main() {
/* Specify cutoff functions */
// l4n::CutoffFunction1 cutoff1(10.1);
l4n
::
CutoffFunction2
cutoff1
(
8
);
l4n
::
CutoffFunction2
cutoff2
(
25
);
//
l4n::CutoffFunction2 cutoff2(25);
// l4n::CutoffFunction2 cutoff2(15.2);
// l4n::CutoffFunction2 cutoff4(10.3);
// l4n::CutoffFunction2 cutoff5(12.9);
// l4n::CutoffFunction2 cutoff6(11);
/* Specify symmetry functions */
l4n
::
G1
sym_f1
(
&
cutoff1
);
//
l4n::G1 sym_f1(&cutoff1);
l4n
::
G2
sym_f2
(
&
cutoff1
,
2.09
,
0.8
);
l4n
::
G2
sym_f3
(
&
cutoff1
,
0.01
,
0.04
);
l4n
::
G2
sym_f4
(
&
cutoff2
,
0.02
,
0.04
);
l4n
::
G2
sym_f5
(
&
cutoff2
,
2.09
,
0.04
);
//
l4n::G2 sym_f4(&cutoff2, 0.02, 0.04);
//
l4n::G2 sym_f5(&cutoff2, 2.09, 0.04);
// l4n::G3 sym_f4(&cutoff4, 0.3);
...
...
@@ -133,7 +134,7 @@ int main() {
// l4n::G4 sym_f7(&cutoff6, 0.5, true, 0.05);
// l4n::G4 sym_f8(&cutoff6, 0.5, false, 0.05);
std
::
vector
<
l4n
::
SymmetryFunction
*>
helium_sym_funcs
=
{
&
sym_f1
,
&
sym_f2
,
&
sym_f3
,
&
sym_f4
,
&
sym_f5
};
//, &sym_f6, &sym_f7, &sym_f8};
std
::
vector
<
l4n
::
SymmetryFunction
*>
helium_sym_funcs
=
{
&
sym_f2
,
&
sym_f3
};
//
, &sym_f4, &sym_f5}; //, &sym_f6, &sym_f7, &sym_f8};
l4n
::
Element
helium
=
l4n
::
Element
(
"He"
,
helium_sym_funcs
);
...
...
@@ -141,7 +142,7 @@ int main() {
elements
[
l4n
::
ELEMENT_SYMBOL
::
He
]
=
&
helium
;
/* Read data */
l4n
::
XYZReader
reader
(
"/home/
martin/Desktop
/HE4+T0.xyz"
,
true
);
l4n
::
XYZReader
reader
(
"/home/
bes0030
/HE4+T0.xyz"
,
true
);
reader
.
read
();
std
::
cout
<<
"Finished reading data"
<<
std
::
endl
;
...
...
@@ -150,7 +151,7 @@ int main() {
/* Create a neural network */
std
::
unordered_map
<
l4n
::
ELEMENT_SYMBOL
,
std
::
vector
<
unsigned
int
>>
n_hidden_neurons
;
n_hidden_neurons
[
l4n
::
ELEMENT_SYMBOL
::
He
]
=
{
1
0
,
1
};
n_hidden_neurons
[
l4n
::
ELEMENT_SYMBOL
::
He
]
=
{
2
0
,
1
};
std
::
unordered_map
<
l4n
::
ELEMENT_SYMBOL
,
std
::
vector
<
l4n
::
NEURON_TYPE
>>
type_hidden_neurons
;
type_hidden_neurons
[
l4n
::
ELEMENT_SYMBOL
::
He
]
=
{
l4n
::
NEURON_TYPE
::
LOGISTIC
,
l4n
::
NEURON_TYPE
::
LINEAR
};
...
...
@@ -159,16 +160,13 @@ int main() {
l4n
::
MSE
mse
(
&
net
,
ds
.
get
());
std
::
cout
<<
net
.
get_min_max_weight
().
first
<<
" "
<<
net
.
get_min_max_weight
().
second
<<
std
::
endl
;
net
.
randomize_parameters
();
//
optimize_via_particle_swarm(net, mse);
double
err1
=
optimize_via_LBMQ
(
net
,
mse
);
optimize_via_particle_swarm
(
net
,
mse
);
//
double err1 = optimize_via_LBMQ(net, mse);
double
err2
=
optimize_via_gradient_descent
(
net
,
mse
);
if
(
err2
>
0.00001
)
{
throw
std
::
runtime_error
(
"Training was incorrect!"
);
}
std
::
cout
<<
"Weights: "
<<
net
.
get_min_max_weight
().
first
<<
" "
<<
net
.
get_min_max_weight
().
second
<<
std
::
endl
;
/* Print fit comparison with real data */
std
::
vector
<
double
>
output
;
...
...
@@ -177,11 +175,11 @@ int main() {
for
(
auto
e
:
*
ds
->
get_data
())
{
for
(
unsigned
int
i
=
0
;
i
<
e
.
first
.
size
();
i
++
)
{
std
::
cout
<<
e
.
first
.
at
(
i
)
<<
" "
;
if
(
i
%
5
==
4
)
{
if
(
i
%
2
==
1
)
{
std
::
cout
<<
std
::
endl
;
}
}
std
::
cout
<<
e
.
second
.
at
(
0
)
<<
" "
;
std
::
cout
<<
"OUTS (DS, predict): "
<<
e
.
second
.
at
(
0
)
<<
" "
;
net
.
eval_single
(
e
.
first
,
output
);
std
::
cout
<<
output
.
at
(
0
)
<<
std
::
endl
;
}
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment