Commit 3b275aae authored by Martin Beseda's avatar Martin Beseda

[CODE] Reformatted source code.

parent 5fb7eafe
......@@ -91,7 +91,7 @@ namespace lib4neuro {
for (auto line : this->data) {
//TODO check empty values in data
std::vector<double> input;
for (auto ind : *input_col_indices) {
for (auto ind : *input_col_indices) {
std::string s;
try {
......@@ -113,17 +113,19 @@ namespace lib4neuro {
/* Add loaded number to the vector of inputs */
input.push_back(tmp);
} catch (const std::out_of_range& e) {
}
catch (const std::out_of_range& e) {
THROW_OUT_OF_RANGE_ERROR("Non-existing index specified (" + std::to_string(ind) + ")!");
} catch (const boost::bad_lexical_cast& e) {
}
catch (const boost::bad_lexical_cast& e) {
THROW_RUNTIME_ERROR(
"Value \"" + s + "\" is not numerical and so it cannot be used in Data Set!");
"Value \"" + s + "\" is not numerical and so it cannot be used in Data Set!");
}
}
std::vector<double> output;
for (auto ind : *output_col_indices) {
for (auto ind : *output_col_indices) {
output.emplace_back(std::stod(line.at(ind)));
}
......
......@@ -10,9 +10,9 @@ BOOST_CLASS_EXPORT_IMPLEMENT(lib4neuro::DataSet);
namespace lib4neuro {
DataSet::DataSet() {
this->n_elements = 0;
this->input_dim = 0;
this->output_dim = 0;
this->n_elements = 0;
this->input_dim = 0;
this->output_dim = 0;
this->normalization_strategy = std::make_shared<DoubleUnitStrategy>(DoubleUnitStrategy());
}
......@@ -22,10 +22,11 @@ namespace lib4neuro {
try {
boost::archive::text_iarchive ia(ifs);
ia >> *this;
} catch (boost::archive::archive_exception& e) {
}
catch (boost::archive::archive_exception& e) {
THROW_RUNTIME_ERROR(
"Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
"the serialized DataSet.");
"Serialized archive error: '" + e.what() + "'! Please, check if your file is really "
"the serialized DataSet.");
}
ifs.close();
} else {
......@@ -40,8 +41,8 @@ namespace lib4neuro {
NormalizationStrategy* ns) {
this->data.clear();
this->n_elements = data_ptr->size();
this->data = *data_ptr;
this->input_dim = this->data[0].first.size();
this->data = *data_ptr;
this->input_dim = this->data[0].first.size();
this->output_dim = this->data[0].second.size();
if (ns) {
......@@ -59,9 +60,9 @@ namespace lib4neuro {
double output,
NormalizationStrategy* ns) {
std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
this->data = new_data_vec;
this->data = new_data_vec;
this->n_elements = 0;
this->input_dim = 1;
this->input_dim = 1;
this->output_dim = 1;
if (ns) {
......@@ -81,8 +82,8 @@ namespace lib4neuro {
unsigned int output_dim,
NormalizationStrategy* ns) {
std::vector<std::pair<std::vector<double>, std::vector<double>>> new_data_vec;
this->data = new_data_vec;
this->input_dim = bounds.size() / 2;
this->data = new_data_vec;
this->input_dim = bounds.size() / 2;
this->output_dim = output_dim;
this->n_elements = 0;
......@@ -97,14 +98,14 @@ namespace lib4neuro {
output_func);
}
DataSet::~DataSet(){
DataSet::~DataSet() {
}
void DataSet::add_data_pair(std::vector<double>& inputs,
std::vector<double>& outputs) {
if (this->n_elements == 0 && this->input_dim == 0 && this->output_dim == 0) {
this->input_dim = inputs.size();
this->input_dim = inputs.size();
this->output_dim = outputs.size();
}
......@@ -157,8 +158,8 @@ namespace lib4neuro {
// TODO add check of dataset dimensions
std::vector<std::vector<double>> grid;
std::vector<double> tmp;
double frac;
std::vector<double> tmp;
double frac;
if (no_elems_in_one_dim < 1) {
THROW_INVALID_ARGUMENT_ERROR("Number of elements in one dimension has to be >=1 !");
}
......@@ -257,7 +258,7 @@ namespace lib4neuro {
if (!ofs.is_open()) {
THROW_RUNTIME_ERROR("File " + file_path + " couldn't be open!");
} else {
this->store_data_text( &ofs );
this->store_data_text(&ofs);
ofs.close();
}
}
......@@ -265,7 +266,7 @@ namespace lib4neuro {
template<class T>
std::vector<std::vector<T>> DataSet::cartesian_product(const std::vector<std::vector<T>>* v) {
std::vector<std::vector<double>> v_combined_old, v_combined, v_tmp;
std::vector<double> tmp;
std::vector<double> tmp;
for (const auto& e : v->at(0)) {
tmp = {e};
......@@ -307,12 +308,12 @@ namespace lib4neuro {
this->max_min_inp_val.emplace_back(this->data.at(0).first.at(0));
}
double tmp, tmp2;
double tmp, tmp2;
for (auto pair : this->data) {
/* Finding maximum */
//TODO make more efficiently
tmp = *std::max_element(pair.first.begin(),
pair.first.end());
tmp = *std::max_element(pair.first.begin(),
pair.first.end());
tmp2 = *std::max_element(pair.second.begin(),
pair.second.end());
......@@ -325,8 +326,8 @@ namespace lib4neuro {
}
/* Finding minimum */
tmp = *std::min_element(pair.first.begin(),
pair.first.end());
tmp = *std::min_element(pair.first.begin(),
pair.first.end());
tmp2 = *std::min_element(pair.second.begin(),
pair.second.end());
......@@ -371,7 +372,7 @@ namespace lib4neuro {
if (!this->normalized || !this->normalization_strategy) {
return val;
}
return this->normalization_strategy->de_normalize( val );
return this->normalization_strategy->de_normalize(val);
}
void DataSet::get_input(std::vector<double>& d,
......@@ -456,7 +457,7 @@ namespace lib4neuro {
this->data.size()) + 1;
n_chosen = max;
std::vector<size_t> chosens;
size_t chosen;
size_t chosen;
for (size_t i = 0; i < n_chosen; i++) {
chosen = rand() % this->data.size();
......
This diff is collapsed.
......@@ -36,8 +36,8 @@ namespace lib4neuro {
* @return
*/
virtual double eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* weights = nullptr) = 0;
std::vector<double>* output,
std::vector<double>* weights = nullptr) = 0;
/**
*
......@@ -99,7 +99,7 @@ namespace lib4neuro {
*
* @param params
*/
virtual void set_parameters(std::vector<double> &params) = 0;
virtual void set_parameters(std::vector<double>& params) = 0;
/**
*
......@@ -118,7 +118,8 @@ namespace lib4neuro {
* @param jacobian
* @param rhs
*/
virtual void get_jacobian_and_rhs(std::vector<std::vector<double>> &jacobian, std::vector<double> &rhs) = 0;
virtual void get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) = 0;
/**
*
......@@ -288,7 +289,8 @@ namespace lib4neuro {
* @param jacobian
* @param rhs
*/
LIB4NEURO_API virtual void get_jacobian_and_rhs(std::vector<std::vector<double>> &jacobian, std::vector<double> &rhs) override ;
LIB4NEURO_API virtual void get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) override;
/**
*
* @param weights
......@@ -450,19 +452,19 @@ namespace lib4neuro {
*
* @param params
*/
LIB4NEURO_API virtual void set_parameters(std::vector<double> &params) override;
LIB4NEURO_API virtual void set_parameters(std::vector<double>& params) override;
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_data_set() override ;
LIB4NEURO_API virtual size_t get_n_data_set() override;
/**
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_test_data_set() override ;
LIB4NEURO_API virtual size_t get_n_test_data_set() override;
/**
*
......@@ -643,7 +645,7 @@ namespace lib4neuro {
*
* @param params
*/
LIB4NEURO_API virtual void set_parameters(std::vector<double> &params) override;
LIB4NEURO_API virtual void set_parameters(std::vector<double>& params) override;
/**
*
......@@ -655,7 +657,7 @@ namespace lib4neuro {
*
* @return
*/
LIB4NEURO_API virtual size_t get_n_test_data_set() override ;
LIB4NEURO_API virtual size_t get_n_test_data_set() override;
/**
*
......@@ -680,8 +682,8 @@ namespace lib4neuro {
* @param rhs
*/
LIB4NEURO_API virtual void get_jacobian_and_rhs(
std::vector<std::vector<double>> &jacobian,
std::vector<double> &rhs) override;
std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) override;
/**
*
......@@ -691,7 +693,7 @@ namespace lib4neuro {
protected:
std::vector<ErrorFunction*>* summand;
std::vector<double> summand_coefficient;
std::vector<double> summand_coefficient;
};
}
......
......@@ -13,8 +13,8 @@
BOOST_CLASS_EXPORT_KEY(ExprtkWrapper);
typedef exprtk::symbol_table<double> symbol_table_t;
typedef exprtk::expression<double> expression_t;
typedef exprtk::parser<double> parser_t;
typedef exprtk::expression<double> expression_t;
typedef exprtk::parser<double> parser_t;
/**
* Class implementing the private properties
......
......@@ -14,10 +14,10 @@ namespace lib4neuro {
size_t n_to_restart,
int max_iters,
size_t batch) {
this->tolerance = epsilon;
this->tolerance = epsilon;
this->restart_frequency = n_to_restart;
this->maximum_niters = max_iters;
this->batch = batch;
this->maximum_niters = max_iters;
this->batch = batch;
}
GradientDescent::~GradientDescent() {
......@@ -44,25 +44,25 @@ namespace lib4neuro {
}
bool GradientDescent::perform_feasible_1D_step(
lib4neuro::ErrorFunction& ef,
double error_previous,
double step_coefficient,
std::shared_ptr<std::vector<double>> direction,
std::shared_ptr<std::vector<double>> parameters_before,
std::shared_ptr<std::vector<double>> parameters_after
lib4neuro::ErrorFunction& ef,
double error_previous,
double step_coefficient,
std::shared_ptr<std::vector<double>> direction,
std::shared_ptr<std::vector<double>> parameters_before,
std::shared_ptr<std::vector<double>> parameters_after
) {
size_t i;
boost::random::mt19937 gen(std::time(0));
boost::random::mt19937 gen(std::time(0));
boost::random::uniform_int_distribution<> dis(0,
direction->size());
size_t max_dir_idx = dis(gen);
size_t max_dir_idx = dis(gen);
double error_current = error_previous + 1.0;
while (error_current >= error_previous) {
(*parameters_after)[max_dir_idx] =
(*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
(*parameters_before)[max_dir_idx] - step_coefficient * (*direction)[max_dir_idx];
error_current = ef.eval(parameters_after.get());
if (step_coefficient < 1e-32) {
......@@ -92,16 +92,16 @@ namespace lib4neuro {
*ofs << "Initial error: " << ef.eval() << std::endl;
}
double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
double grad_norm_prev;
size_t i;
long long int iter_idx = this->maximum_niters;
size_t iter_counter = 0;
double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
double grad_norm_prev;
size_t i;
long long int iter_idx = this->maximum_niters;
size_t iter_counter = 0;
gamma = 1.0;
gamma = 1.0;
double prev_val, val = 0.0, c = 1.25;
size_t n_parameters = ef.get_dimension();
size_t n_parameters = ef.get_dimension();
std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
......@@ -120,11 +120,11 @@ namespace lib4neuro {
val = ef.eval(params_current);
size_t counter_good_guesses = 0, counter_bad_guesses = 0, counter_simplified_direction_good = 0, counter_simplified_direction_bad = 0;
double cooling = 1.0;
double cooling = 1.0;
while (grad_norm > this->tolerance && (iter_idx != 0)) {
iter_idx--;
iter_counter++;
prev_val = val;
prev_val = val;
grad_norm_prev = grad_norm;
/* reset of the current gradient */
......@@ -147,11 +147,11 @@ namespace lib4neuro {
/* step length calculation */
if (iter_counter < 10 || iter_counter % this->restart_frequency == 0) {
/* fixed step length */
gamma = 0.1 * this->tolerance;
gamma = 0.1 * this->tolerance;
cooling = 1.0;
} else {
/* angle between two consecutive gradients */
sx = 0.0;
sx = 0.0;
for (i = 0; i < gradient_current->size(); ++i) {
sx += (gradient_current->at(i) * gradient_prev->at(i));
}
......@@ -161,7 +161,7 @@ namespace lib4neuro {
} else if (sx > 1.0 - 5e-12) {
sx = 1 - 5e-12;
}
beta = std::sqrt(std::acos(sx) / lib4neuro::PI);
beta = std::sqrt(std::acos(sx) / lib4neuro::PI);
eval_step_size_mk(gamma,
beta,
......@@ -181,12 +181,12 @@ namespace lib4neuro {
/* switcheroo */
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
gradient_current = ptr_mem;
ptr_mem = params_prev;
params_prev = params_current;
ptr_mem = params_prev;
params_prev = params_current;
params_current = ptr_mem;
......
......@@ -78,12 +78,12 @@ namespace lib4neuro {
* @param parameters_after[out] suggested state of the parameters after the analysis completes
*/
virtual bool perform_feasible_1D_step(
lib4neuro::ErrorFunction& ef,
double error_previous,
double step_coefficient,
std::shared_ptr<std::vector<double>> direction,
std::shared_ptr<std::vector<double>> parameters_before,
std::shared_ptr<std::vector<double>> parameters_after
lib4neuro::ErrorFunction& ef,
double error_previous,
double step_coefficient,
std::shared_ptr<std::vector<double>> direction,
std::shared_ptr<std::vector<double>> parameters_before,
std::shared_ptr<std::vector<double>> parameters_after
);
public:
......
......@@ -13,10 +13,10 @@ namespace lib4neuro {
size_t n_to_restart,
int max_iters,
size_t batch) {
this->tolerance = epsilon;
this->tolerance = epsilon;
this->restart_frequency = n_to_restart;
this->maximum_niters = max_iters;
this->batch = batch;
this->maximum_niters = max_iters;
this->batch = batch;
}
GradientDescentBB::~GradientDescentBB() {
......@@ -35,16 +35,16 @@ namespace lib4neuro {
*ofs << "Initial error: " << ef.eval() << std::endl;
}
double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
double grad_norm_prev;
size_t i;
long long int iter_idx = this->maximum_niters;
size_t iter_counter = 0;
double grad_norm = this->tolerance * 10.0, gamma, sx, beta;
double grad_norm_prev;
size_t i;
long long int iter_idx = this->maximum_niters;
size_t iter_counter = 0;
gamma = 1.0;
gamma = 1.0;
double prev_val, val = 0.0, c = 1.25, val_best;
size_t n_parameters = ef.get_dimension();
size_t n_parameters = ef.get_dimension();
std::vector<double>* gradient_current(new std::vector<double>(n_parameters));
......@@ -55,9 +55,9 @@ namespace lib4neuro {
std::vector<double>* ptr_mem;
double alpha = -1.0, cc, gg;
double alpha = -1.0, cc, gg;
std::vector<double> dot__(3);
double d1 = 0.0, d2 = 0.0, d3 = 0.0;
double d1 = 0.0, d2 = 0.0, d3 = 0.0;
std::fill(gradient_current->begin(),
......@@ -66,14 +66,14 @@ namespace lib4neuro {
std::fill(gradient_prev->begin(),
gradient_prev->end(),
0.0);
val = ef.eval(params_current);
val = ef.eval(params_current);
val_best = val;
double cooling_factor = 1.0;
while (grad_norm > this->tolerance && (iter_idx != 0)) {
iter_idx--;
iter_counter++;
prev_val = val;
prev_val = val;
grad_norm_prev = grad_norm;
/* reset of the current gradient */
......@@ -97,7 +97,7 @@ namespace lib4neuro {
/* step length calculation */
if (iter_counter < 10 || iter_counter % this->restart_frequency < 10) {
/* fixed step length */
gamma = 0.1 * this->tolerance;
gamma = 0.1 * this->tolerance;
cooling_factor = 1.0;
} else {
......@@ -131,12 +131,12 @@ namespace lib4neuro {
/* switcheroo */
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
ptr_mem = gradient_prev;
gradient_prev = gradient_current;
gradient_current = ptr_mem;
ptr_mem = params_prev;
params_prev = params_current;
ptr_mem = params_prev;
params_prev = params_current;
params_current = ptr_mem;
val = ef.eval(params_current);
......@@ -206,7 +206,7 @@ namespace lib4neuro {
delete gradient_current;
delete gradient_prev;
delete params_current ;
delete params_current;
delete params_prev;
delete params_best;
......
......@@ -15,10 +15,10 @@ namespace lib4neuro {
size_t n_to_restart,
int max_iters,
size_t batch) {
this->tolerance = epsilon;
this->tolerance = epsilon;
this->restart_frequency = n_to_restart;
this->maximum_niters = max_iters;
this->batch = batch;
this->maximum_niters = max_iters;
this->batch = batch;
}
GradientDescentSingleItem::~GradientDescentSingleItem() {
......@@ -36,7 +36,7 @@ namespace lib4neuro {
double alpha = 10.0 / n_elems;
alpha = 1.0;
double value = f.eval();
double value = f.eval();
double value_shifted = value + 1.0;
......@@ -58,13 +58,13 @@ namespace lib4neuro {
std::ofstream* ofs) {
COUT_INFO("Finding a solution via a Gradient Descent [Single Item] method with adaptive step-length..."
<< std::endl);
<< std::endl);
COUT_INFO("Initial error: " << ef.eval() << std::endl);
size_t total_elements = ef.get_n_data_set(), updated_elements = 0, iter = 0;
double max_error = 1.0, error, gamma;
size_t iter_idx = this->maximum_niters;
size_t dim = ef.get_dimension();
double max_error = 1.0, error, gamma;
size_t iter_idx = this->maximum_niters;
size_t dim = ef.get_dimension();
std::vector<double> parameter_vector = ef.get_parameters();
std::vector<double> gradient_vector(dim);
......@@ -74,7 +74,7 @@ namespace lib4neuro {
iter_idx--;
iter++;
max_error = 0.0;
max_error = 0.0;
updated_elements = 0;
std::fill(search_direction.begin(),
search_direction.end(),
......
......@@ -12,7 +12,7 @@ namespace lib4neuro {
LearningSequence::LearningSequence(double tolerance,
int max_n_cycles) {
this->tol = tolerance;
this->tol = tolerance;
this->max_number_of_cycles = max_n_cycles;
}
......@@ -27,7 +27,7 @@ namespace lib4neuro {
double error = ef.eval();
this->optimal_parameters = ef.get_parameters();
double the_best_error = error;
int mcycles = this->max_number_of_cycles, cycle_idx = 0;
int mcycles = this->max_number_of_cycles, cycle_idx = 0;
std::vector<double> params;
while (error > this->tol && mcycles != 0) {
......@@ -40,7 +40,7 @@ namespace lib4neuro {
//TODO do NOT copy vectors if not needed
params = *m->get_parameters();
error = ef.eval(&params);
error = ef.eval(&params);
ef.set_parameters(params);
......
......@@ -43,39 +43,42 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
};
void lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl::get_jacobian_and_rhs(
lib4neuro::ErrorFunction& ef,
arma::Mat<double>& J,
arma::Col<double>& rhs,
size_t data_subset_size) {
lib4neuro::ErrorFunction& ef,
arma::Mat<double>& J,
arma::Col<double>& rhs,
size_t data_subset_size) {
std::vector<std::vector<double>> jacobian;
std::vector<double> rhs_vec;
std::vector<double> rhs_vec;
if(data_subset_size <= 0){
data_subset_size = ef.get_n_data_set();
if (data_subset_size <= 0) {
data_subset_size = ef.get_n_data_set();
}
if(data_subset_size < ef.get_n_data_set()){
ef.divide_data_train_test((double)data_subset_size / (double)ef.get_n_data_set());
if (data_subset_size < ef.get_n_data_set()) {
ef.divide_data_train_test((double) data_subset_size / (double) ef.get_n_data_set());
}
ef.get_jacobian_and_rhs(jacobian, rhs_vec);
ef.get_jacobian_and_rhs(jacobian,
rhs_vec);
if(data_subset_size < ef.get_n_data_set()){
if (data_subset_size < ef.get_n_data_set()) {
ef.return_full_data_set_for_training();
}
size_t dim_out = jacobian.size();
size_t dim_out = jacobian.size();
size_t n_parameters = rhs_vec.size();
J.reshape(dim_out, n_parameters);
J.reshape(dim_out,
n_parameters);
rhs.resize(n_parameters);
J.fill(0.0);
rhs.fill(0.0);
for (size_t ri = 0; ri < jacobian.size(); ++ri) {
for (size_t ci = 0; ci < n_parameters; ++ci) {
J.at(ri, ci) = jacobian[ri][ci];
J.at(ri,
ci) = jacobian[ri][ci];
}
}
for (size_t ci = 0; ci < n_parameters; ++ci) {
......@@ -94,15 +97,15 @@ namespace lib4neuro {
double lambda_increase,
double lambda_decrease) : p_impl(new LevenbergMarquardtImpl()) {
this->p_impl->batch_size = bs;
this->p_impl->tolerance = tolerance;
this->p_impl->tolerance_gradient = tolerance_gradient;
this->p_impl->tolerance_parameters = tolerance_parameters;
this->p_impl->batch_size = bs;
this->p_impl->tolerance = tolerance;
this->p_impl->tolerance_gradient = tolerance_gradient;
this->p_impl->tolerance_parameters = tolerance_parameters;
this->p_impl->LM_step_acceptance_threshold = LM_step_acceptance_threshold;
this->p_impl->lambda_initial = lambda_initial;
this->p_impl->lambda_increase = lambda_increase;
this->p_impl->lambda_decrease = lambda_decrease;
this->p_impl->maximum_niters = max_iters;
this->p_impl->lambda_initial = lambda_initial;
this->p_impl->lambda_increase = lambda_increase;
this->p_impl->lambda_decrease = lambda_decrease;
this->p_impl->maximum_niters = max_iters;
}
void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
......@@ -118,13 +121,13 @@ namespace lib4neuro {
double current_err = ef.eval();
COUT_INFO(
"Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
"Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err << std::endl);
if (ofs && ofs->is_open()) {
*ofs << "Finding a solution via a Levenberg-Marquardt method... Starting error: " << current_err
<< std::endl;
}
size_t n_parameters = ef.get_dimension();
size_t n_parameters = ef.get_dimension();
size_t n_data_points = ef.get_n_data_set();