Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
lib4neuro
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container Registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
MolDyn
lib4neuro
Commits
b601c67b
Commit
b601c67b
authored
6 years ago
by
Martin Beseda
Browse files
Options
Downloads
Patches
Plain Diff
ENH: added comments to simulator
parent
42355836
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/examples/simulator.cpp
+65
-46
65 additions, 46 deletions
src/examples/simulator.cpp
with
65 additions
and
46 deletions
src/examples/simulator.cpp
+
65
−
46
View file @
b601c67b
...
...
@@ -20,71 +20,87 @@
#include
"4neuro.h"
#include
"../CrossValidator/CrossValidator.h"
double
get_rel_error
(
std
::
vector
<
double
>
&
d1
,
std
::
vector
<
double
>
&
d2
){
double
out
=
0
,
m
,
n
=
0
;
assert
(
d1
.
size
()
==
d2
.
size
());
for
(
size_t
i
=
0
;
i
<
d1
.
size
();
++
i
){
m
=
d1
[
i
]
-
d2
[
i
];
n
+=
d1
[
i
]
*
d1
[
i
];
out
+=
m
*
m
;
}
return
m
/
n
;
}
int
main
(
int
argc
,
char
**
argv
){
try
{
/* Read data from the file */
// l4n::CSVReader reader("/home/martin/Desktop/ANN_DATA_1_SET.txt", "\t", true);
l4n
::
CSVReader
reader
(
"/tmp/data_Heaviside.txt"
,
"
\t
"
,
false
);
reader
.
read
();
l4n
::
CSVReader
reader
(
"/home/martin/Desktop/data_Heaviside.txt"
,
"
\t
"
,
true
);
// File, separator, skip 1st line
reader
.
read
();
// Read from the file
/* Create data set for both the training and testing of the neural network */
std
::
vector
<
unsigned
int
>
inputs
=
{
0
};
std
::
vector
<
unsigned
int
>
outputs
=
{
1
};
std
::
vector
<
unsigned
int
>
inputs
=
{
3
};
// Possible multiple inputs, e.g. {0,3}
std
::
vector
<
unsigned
int
>
outputs
=
{
1
};
// Possible multiple outputs, e.g. {1,2}
l4n
::
DataSet
ds
=
reader
.
get_data_set
(
&
inputs
,
&
outputs
);
ds
.
normalize
();
l4n
::
DataSet
ds
=
reader
.
get_data_set
(
&
inputs
,
&
outputs
);
// Creation of data-set for NN
ds
.
normalize
();
// Normalization of data to prevent numerical problems
// ds.print_data();
// ds.print_data();
// Printing of data-set to check it
/* Neural network construction */
// Numbers of neurons in layers (including input and output layers)
std
::
vector
<
unsigned
int
>
neuron_numbers_in_layers
=
{
1
,
10
,
10
,
1
};
// Creation of fully connected feed-forward network with linear activation functions for input and output
// layers and the specified a.f. for the hidden ones
l4n
::
FullyConnectedFFN
nn
(
&
neuron_numbers_in_layers
,
l4n
::
NEURON_TYPE
::
LOGISTIC
);
/* Error function */
l4n
::
MSE
mse
(
&
nn
,
&
ds
);
l4n
::
MSE
mse
(
&
nn
,
&
ds
);
// First parameter - neural network, second parameter - data-set
/* Domain */
/* Domain
- important for Particle Swarm method
*/
std
::
vector
<
double
>
domain_bounds
(
2
*
(
nn
.
get_n_weights
()
+
nn
.
get_n_biases
()));
for
(
size_t
i
=
0
;
i
<
domain_bounds
.
size
()
/
2
;
++
i
){
domain_bounds
[
2
*
i
]
=
-
10
;
domain_bounds
[
2
*
i
+
1
]
=
10
;
}
/* Training method */
// for(size_t i = 0; i < domain_bounds.size() / 2; ++i){
// domain_bounds[2 * i] = -10;
// domain_bounds[2 * i + 1] = 10;
// }
// l4n::ParticleSwarm ps(&domain_bounds,
// 1.711897,
// 1.711897,
// 0.711897,
// 0.5,
// 20,
// 0.7,
// 600,
// 1000);
l4n
::
GradientDescent
gs
(
1e-3
,
100
,
100000
);
nn
.
randomize_weights
();
// Parameters
// 1) domain_bounds Bounds for every optimized parameter (p1_lower, p1_upper, p2_lower, p2_upper...)
// 2) c1 Cognitive parameter
// 3) c2 Social parameter
// 4) w Inertia weight
// 5) gamma Threshold value for particle velocity - all particles must posses the same or slower velocity for the algorithm to end
// 6) epsilon Radius of the cluster area (Euclidean distance)
// 7) delta Amount of particles, which has to be in the cluster for the algorithm to stop (0-1)
// 8) n_particles Number of particles in the swarm
// 9) iter_max Maximal number of iterations - optimization will stop after that, even if not converged
// l4n::ParticleSwarm ps(&domain_bounds,
// 1.711897,
// 1.711897,
// 0.711897,
// 0.5,
// 20,
// 0.7,
// 600,
// 1000);
// Parameters
// 1) Threshold for the successful ending of the optimization - deviation from minima
// 2) Number of iterations to reset step size to tolerance/10.0
// 3) Maximal number of iterations - optimization will stop after that, even if not converged
l4n
::
GradientDescent
gs
(
1e-3
,
100
,
10
);
// Weight and bias randomization in the network according to the uniform distribution
// Calling methods nn.randomize_weights() and nn.randomize_biases()
nn
.
randomize_parameters
();
// gs.optimize(mse); // Network training
// std::vector<double> i(ds.get_input_dim());
// std::vector<double> o(ds.get_output_dim());
// nn.eval_single(i, o); // Evaluate network for one input and save the result into the output vector
/* Cross - validation */
l4n
::
CrossValidator
cv
(
&
gs
,
&
mse
);
// Parameters: 1) Number of data-set parts used for CV, 2) Number of tests performed
cv
.
run_k_fold_test
(
10
,
1
);
/* Save network to the file */
/* Save network to the
text
file */
nn
.
save_text
(
"test_net.4n"
);
/* Check of the saved network */
...
...
@@ -99,10 +115,11 @@ int main(int argc, char** argv){
/* Example of evaluation of a single input, normalized input, de-normalized output */
std
::
vector
<
double
>
input_norm
(
ds
.
get_input_dim
()),
input
(
ds
.
get_input_dim
()),
output_norm
(
ds
.
get_output_dim
()),
expected_output_norm
(
ds
.
get_output_dim
()),
output
(
ds
.
get_output_dim
());
input
(
ds
.
get_input_dim
()),
output_norm
(
ds
.
get_output_dim
()),
expected_output_norm
(
ds
.
get_output_dim
()),
output
(
ds
.
get_output_dim
()),
expected_output
(
ds
.
get_output_dim
());
size_t
data_idx
=
0
;
ds
.
get_input
(
input_norm
,
data_idx
);
...
...
@@ -112,6 +129,7 @@ int main(int argc, char** argv){
ds
.
de_normalize_single
(
output_norm
,
output
);
ds
.
de_normalize_single
(
input_norm
,
input
);
ds
.
de_normalize_single
(
expected_output_norm
,
expected_output
);
std
::
cout
<<
std
::
endl
<<
"input: "
;
for
(
auto
el
:
input_norm
)
{
std
::
cout
<<
el
<<
", "
;
}
...
...
@@ -119,8 +137,9 @@ int main(int argc, char** argv){
std
::
cout
<<
"output: "
;
for
(
auto
el
:
output
)
{
std
::
cout
<<
el
<<
", "
;
}
std
::
cout
<<
std
::
endl
;
std
::
cout
<<
"error of the "
<<
data_idx
<<
"-th element: "
<<
get_rel_error
(
output_norm
,
expected_output_norm
)
<<
std
::
endl
;
std
::
cout
<<
"expected output: "
;
for
(
auto
el
:
expected_output
)
{
std
::
cout
<<
el
<<
", "
;
}
std
::
cout
<<
std
::
endl
;
return
0
;
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment