Newer
Older
#include <sstream>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int_distribution.hpp>
Martin Beseda
committed
#include "exceptions.h"
#include "message.h"
Martin Beseda
committed
namespace lib4neuro {

Michal Kravcenko
committed
Martin Beseda
committed
size_t ErrorFunction::get_dimension() {
return this->dimension;
}
Martin Beseda
committed
std::vector<NeuralNetwork*>& ErrorFunction::get_nets() {
return nets;
}
DataSet* ErrorFunction::get_dataset() const {
return ds;
}
void ErrorFunction::set_dataset(DataSet* ds) {
this->ds = ds;
}
void ErrorFunction::divide_data_train_test(double percent_test) {
size_t ds_size = this->ds->get_n_elements();
/* Store the full data set */
this->ds_full = this->ds;
/* Choose random subset of the DataSet for training and the remaining part for validation */
boost::random::uniform_int_distribution<> dist(0,
ds_size - 1);
size_t test_set_size = ceil(ds_size * percent_test);
std::vector<unsigned int> test_indices;
test_indices.reserve(test_set_size);
for (size_t i = 0; i < test_set_size; i++) {
test_indices.emplace_back(dist(gen));
}
std::sort(test_indices.begin(),
test_indices.end(),
std::greater<unsigned int>());
std::vector<std::pair<std::vector<double>, std::vector<double>>> test_data, train_data;
/* Copy all the data to train_data */
Martin Beseda
committed
for (auto e : *this->ds_full->get_data()) {
/* Move the testing data from train_data to test_data */
Martin Beseda
committed
for (auto ind : test_indices) {
test_data.emplace_back(train_data.at(ind));
}
for(auto ind : test_indices) {
train_data.erase(train_data.begin() + ind);
}
/* Re-initialize data set for training */
this->ds = new DataSet(&train_data,
this->ds_full->get_normalization_strategy());
this->ds_test = new DataSet(&test_data,
this->ds_full->get_normalization_strategy());
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
size_t ErrorFunction::divide_data_worst_subset(
std::vector<size_t> &subset_indices,
std::vector<bool> &active_subset,
std::vector<float> &entry_errors
) {
if( this->ds_full == nullptr ){
this->ds_full = this->ds;
}
size_t ds_size = this->ds_full->get_n_elements();
if( entry_errors.size() != ds_size ){
entry_errors.resize( ds_size );
}
if( active_subset.size() != ds_size ){
active_subset.resize( ds_size );
std::fill(active_subset.begin(), active_subset.end(), false);
}
std::vector<double> error_vector( this->get_n_outputs());
for( size_t i = 0; i < ds_size; ++i ) {
entry_errors[ i ] = this->eval_single_item_by_idx( i, nullptr, error_vector );
}
std::vector<std::pair<std::vector<double>, std::vector<double>>> train_set;
double max_error = -1.0;
size_t max_error_entry_idx = 0;
for( size_t i = 0; i < ds_size; ++i ){
if( active_subset[ i ] ){
continue;
}
if( entry_errors[ i ] > max_error ){
max_error = entry_errors[ i ];
max_error_entry_idx = i;
}
}
if( max_error >= 0.0 ){
subset_indices.push_back( max_error_entry_idx );
active_subset[max_error_entry_idx] = true;
}
for( auto el: subset_indices ){
train_set.emplace_back( this->ds_full->get_data( )->at( el ) );
}
if( this->ds != this->ds_full ){
delete this->ds;
}
this->ds = new DataSet(&train_set,
this->ds_full->get_normalization_strategy());
return train_set.size( );
}
void ErrorFunction::return_full_data_set_for_training() {
if (this->ds_test || this->ds != this->ds_full) {
// delete this->ds;
void MSE::get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) {
this->get_jacobian_and_rhs(jacobian, rhs, *this->ds->get_data());
}
void MSE::get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs,
std::vector<std::pair<std::vector<double>, std::vector<double>>>& data) {
// size_t row_idx = 0;
std::vector<double> partial_error(this->get_n_outputs());
rhs.resize(this->get_dimension());
std::fill(rhs.begin(),
rhs.end(),
0.0);
Martin Beseda
committed
std::vector<std::vector<double>> jac_loc;
for (auto item : data) {
Martin Beseda
committed
this->nets[0]->get_jacobian(jac_loc,
Martin Beseda
committed
for (size_t ri = 0; ri < jac_loc.size(); ++ri) {
Martin Beseda
committed
for (size_t ci = 0; ci < this->get_dimension(); ++ci) {
// J.at(row_idx,
// ci) = jacobian[ri][ci];
rhs.at(ci) += partial_error[ri] * jac_loc[ri][ci];
}
// row_idx++;
}
Martin Beseda
committed
}
}
this->nets.push_back(net);
Martin Beseda
committed
this->dimension = net->get_n_weights() + net->get_n_biases();
double MSE::eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double> predicted_output(this->nets[0]->get_n_outputs());
this->nets[0]->eval_single(*input,
Martin Beseda
committed
double result = 0;
double val;
Martin Beseda
committed
Martin Beseda
committed
val = output->at(i) - predicted_output.at(i);
Martin Beseda
committed
}
Martin Beseda
committed
}
double MSE::eval_on_data_set(lib4neuro::DataSet* data_set,
std::ofstream* results_file_path,
bool verbose
) {
size_t dim_in = data_set->get_input_dim();
size_t dim_out = data_set->get_output_dim();
double error = 0.0, val, output_norm = 0;
std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = data_set->get_data();

Michal Kravcenko
committed
size_t n_elements = data->size();

Michal Kravcenko
committed
//TODO instead use something smarter
std::vector<std::vector<double>> outputs(data->size());

Michal Kravcenko
committed
if (verbose) {
COUT_DEBUG("Evaluation of the error function MSE on the given data-set" << std::endl);
COUT_DEBUG(R_ALIGN << "[Element index]" << " "
<< R_ALIGN << "[Input]" << " "
<< R_ALIGN << "[Real output]" << " "
<< R_ALIGN << "[Predicted output]" << " "
<< R_ALIGN << "[Absolute error]" << " "
<< R_ALIGN << "[Relative error %]"
<< std::endl);
}
if (results_file_path) {
*results_file_path << R_ALIGN << "[Element index]" << " "
<< R_ALIGN << "[Input]" << " "
<< R_ALIGN << "[Real output]" << " "
<< R_ALIGN << "[Predicted output]" << " "
<< R_ALIGN << "[Abs. error]" << " "
<< R_ALIGN << "[Rel. error %]"
<< std::endl;
}
Martin Beseda
committed
for (size_t i = 0; i < data->size(); i++) { // Iterate through every element in the test set
Martin Beseda
committed
/* Compute the net output and store it into 'output' variable */
this->nets[0]->eval_single(data->at(i).first,
outputs.at(i) = output;
}
Martin Beseda
committed
double denormalized_output;
double denormalized_real_input;
double denormalized_real_output;
std::string separator = "";
for (size_t i = 0; i < data->size(); i++) {
Martin Beseda
committed
/* Compute difference for every element of the output vector */
std::stringstream ss_input;
denormalized_real_input = data_set->get_denormalized_value(data->at(i).first.at(j));
Martin Beseda
committed
ss_input << separator << denormalized_real_input;
separator = ",";
}
std::stringstream ss_real_output;
std::stringstream ss_predicted_output;
Martin Beseda
committed
double loc_error = 0;
output_norm = 0;
for (size_t j = 0; j < dim_out; ++j) {
denormalized_real_output = data_set->get_denormalized_value(data->at(i).second.at(j));
denormalized_output = data_set->get_denormalized_value(outputs.at(i).at(j));
Martin Beseda
committed
ss_real_output << separator << denormalized_real_output;
ss_predicted_output << separator << denormalized_output;
separator = ",";
Martin Beseda
committed
Martin Beseda
committed
val = denormalized_output - denormalized_real_output;
loc_error += val * val;
error += loc_error;
Martin Beseda
committed
output_norm += denormalized_output * denormalized_output;

Michal Kravcenko
committed
}
// std::cout << " entry #" << i+1 << ", error: " << loc_error << std::endl;
std::stringstream ss_ind;
ss_ind << "[" << i << "]";
#ifdef L4N_DEBUG
if (verbose) {
COUT_DEBUG(R_ALIGN << ss_ind.str() << " "
<< R_ALIGN << ss_input.str() << " "
<< R_ALIGN << ss_real_output.str() << " "
<< R_ALIGN << ss_predicted_output.str() << " "
<< R_ALIGN
<< std::endl);
Martin Beseda
committed
}
Martin Beseda
committed
if (results_file_path) {
*results_file_path << R_ALIGN << ss_ind.str() << " "
<< R_ALIGN << ss_input.str() << " "
<< R_ALIGN << ss_real_output.str() << " "
<< R_ALIGN << ss_predicted_output.str() << " "
<< R_ALIGN
<< std::endl;
}
Martin Beseda
committed
}
Martin Beseda
committed
double result = error / (this->rescale_error?n_elements:1.0);
if (verbose) {
COUT_DEBUG("MSE = " << result << std::endl);
if (results_file_path) {
*results_file_path << "MSE = " << result << std::endl;
Martin Beseda
committed
}
double MSE::eval_on_data_set(DataSet* data_set,
std::string results_file_path,
bool verbose) {
std::ofstream ofs(results_file_path);
if (ofs.is_open()) {
return this->eval_on_data_set(data_set,
&ofs,
weights,
ofs.close();
} else {
THROW_RUNTIME_ERROR("File " + results_file_path + " couldn't be open!");
}
return -1.0;
}
double MSE::eval_on_data_set(DataSet* data_set,
bool verbose) {
return this->eval_on_data_set(data_set,
nullptr,
weights,
}
double MSE::eval(std::vector<double>* weights,
bool denormalize_data,
bool verbose) {
double out = this->eval_on_data_set(this->ds,
nullptr,
weights,
verbose);
MPI_Allreduce( MPI_IN_PLACE, &out, 1, MPI_DOUBLE, MPI_SUM, lib4neuro::mpi_active_comm );
return out;

Michal Kravcenko
committed
double MSE::eval_on_test_data(std::vector<double>* weights,
bool verbose) {
return this->eval_on_data_set(this->ds_test,
weights,
verbose);

Michal Kravcenko
committed
double MSE::eval_on_test_data(std::string results_file_path,
bool verbose) {
return this->eval_on_data_set(this->ds_test,
results_file_path,
weights,
verbose);
Martin Beseda
committed
}
double MSE::eval_on_test_data(std::ofstream* results_file_path,
bool verbose) {
return this->eval_on_data_set(this->ds_test,
results_file_path,
weights,
verbose);
Martin Beseda
committed
}
Martin Beseda
committed
Martin Beseda
committed
void
MSE::calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha,
size_t batch) {

Michal Kravcenko
committed
size_t dim_out = this->ds->get_output_dim();
size_t n_elements = this->ds->get_n_elements();
Martin Beseda
committed
std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
std::fill(grad.begin(), grad.end(), 0.0);
batch = (unsigned int)(batch / lib4neuro::mpi_nranks);
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
// if (lib4neuro::mpi_rank == 0) {
// *data = this->ds->get_random_data_batch(batch);
// n_elements = data->size();
// }
this->ds->activate_next_data_batch(batch);
// std::cout << lib4neuro::mpi_rank << " " << this->ds->get_permutation_data_indices()->size() << std::endl;
// MPI_Barrier(lib4neuro::mpi_active_comm);
// MPI_Bcast(this->ds->get_permutation_data_indices_content(),
// this->ds->get_permutation_data_indices()->size(),
// MPI_UNSIGNED,
// 0,
// lib4neuro::mpi_active_comm);
// MPI_Bcast(this->ds->get_active_elements_content(),
// this->ds->get_active_elements_indices()->size(),
// MPI_UNSIGNED,
// 0,
// lib4neuro::mpi_active_comm);
// unsigned int batch_ind;
// if(lib4neuro::mpi_rank == 0) {
// batch_ind = this->ds->get_current_batch_vector_index();
// }
// MPI_Bcast(this->ds->get_current_batch_vector_index_ptr(),
// 1,
// MPI_UNSIGNED,
// 0,
// lib4neuro::mpi_active_comm);
Martin Beseda
committed
}
std::vector<double> error_derivative(dim_out);
// for (auto el: *data) { // Iterate through every element in the test set
for(auto ind : *this->ds->get_active_elements_indices()) {
auto el = data->at(ind);
this->nets[0]->eval_single(el.first,
error_derivative,
¶ms); // Compute the net output and store it into 'output' variable
error_derivative.at(j) = 2.0 * (error_derivative.at(j) - el.second.at(j)); //real - expected result
this->nets[0]->add_to_gradient_single(el.first,
MPI_Allreduce( MPI_IN_PLACE, &grad[0], grad.size(), MPI_DOUBLE, MPI_SUM, lib4neuro::mpi_active_comm );

Michal Kravcenko
committed
double MSE::calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters) {

Michal Kravcenko
committed
//TODO maybe move to the general ErrorFunction
//TODO check input vector sizes - they HAVE TO be allocated before calling this function
Martin Beseda
committed
return -this->eval_on_single_input(input,
output,
parameters);
Martin Beseda
committed
}
Martin Beseda
committed
void MSE::calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
Martin Beseda
committed
double h) {

Michal Kravcenko
committed
//TODO check input vector sizes - they HAVE TO be allocated before calling this function

Michal Kravcenko
committed
size_t n_parameters = this->get_dimension();
std::vector<double> parameters = this->get_parameters();

Michal Kravcenko
committed
Martin Beseda
committed
double delta; // Complete step size
double former_parameter_value;
double f_val1; // f(x + delta)
double f_val2; // f(x - delta)

Michal Kravcenko
committed
Martin Beseda
committed
for (size_t i = 0; i < n_parameters; i++) {
delta = h * (1 + std::abs(parameters.at(i)));
former_parameter_value = parameters.at(i);

Michal Kravcenko
committed
Martin Beseda
committed
/* Computation of f_val1 = f(x + delta) */
parameters.at(i) = former_parameter_value + delta;
f_val1 = this->calculate_single_residual(input,
output,
¶meters);

Michal Kravcenko
committed
Martin Beseda
committed
/* Computation of f_val2 = f(x - delta) */
parameters.at(i) = former_parameter_value - delta;
f_val2 = this->calculate_single_residual(input,
output,
¶meters);
Martin Beseda
committed
gradient->at(i) = (f_val1 - f_val2) / (2 * delta);
Martin Beseda
committed
/* Restore parameter to the former value */
parameters.at(i) = former_parameter_value;
void MSE::calculate_error_gradient_single(std::vector<double>& error_vector,
std::vector<double>& gradient_vector) {
std::fill(gradient_vector.begin(),
gradient_vector.end(),
0);

Michal Kravcenko
committed
std::vector<double> dummy_input;
this->nets[0]->add_to_gradient_single(dummy_input,
error_vector,
1.0,
gradient_vector);

Michal Kravcenko
committed
}
void
MSE::analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha,
size_t batch) {

Michal Kravcenko
committed
size_t dim_out = this->ds->get_output_dim();

Michal Kravcenko
committed
size_t n_elements = this->ds->get_n_elements();
std::vector<std::pair<std::vector<double>, std::vector<double>>>* data = this->ds->get_data();
if (batch > 0) {
*data = this->ds->get_random_data_batch(batch);
n_elements = data->size();
}
std::vector<double> error_derivative(dim_out);
std::vector<double> grad_sum(grad.size());
std::fill(grad_sum.begin(),
grad_sum.end(),
0.0);
this->nets[0]->write_weights();
this->nets[0]->write_biases();

Michal Kravcenko
committed
for (auto el: *data) { // Iterate through every element in the test set
this->nets[0]->eval_single_debug(el.first,
error_derivative,
¶ms); // Compute the net output and store it into 'output' variable

Michal Kravcenko
committed
std::cout << "Input[";

Michal Kravcenko
committed
std::cout << v << ", ";
}
std::cout << "]";
std::cout << " Desired Output[";

Michal Kravcenko
committed
std::cout << v << ", ";
}
std::cout << "]";
std::cout << " Real Output[";

Michal Kravcenko
committed
std::cout << v << ", ";
}
std::cout << "]";
for (size_t j = 0; j < dim_out; ++j) {
error_derivative.at(j) = 2.0 * (error_derivative.at(j) - el.second.at(j)); //real - expected result

Michal Kravcenko
committed
}
std::cout << " Error derivative[";

Michal Kravcenko
committed
std::cout << v << ", ";
}
std::cout << "]";
std::fill(grad.begin(),
grad.end(),
0.0);
this->nets[0]->add_to_gradient_single_debug(el.first,
grad_sum.at(i) += grad.at(i);

Michal Kravcenko
committed
}
std::cout << " Gradient[";

Michal Kravcenko
committed
std::cout << v << ", ";
}
std::cout << "]";
std::cout << std::endl;
}
std::cout << " Total gradient[";

Michal Kravcenko
committed
std::cout << v << ", ";
}
std::cout << "]" << std::endl << std::endl;
}
double MSE::eval_single_item_by_idx(size_t i,
std::vector<double>* parameter_vector,
std::vector<double>& error_vector) {

Michal Kravcenko
committed
double output = 0, val;
this->nets[0]->eval_single(this->ds->get_data()->at(i).first,

Michal Kravcenko
committed
for (size_t j = 0; j < error_vector.size(); ++j) { // Compute difference for every element of the output vector
val = error_vector.at(j) - this->ds->get_data()->at(i).second.at(j);

Michal Kravcenko
committed
output += val * val;
}
for (size_t j = 0; j < error_vector.size(); ++j) {
error_vector.at(j) =
2.0 * (error_vector.at(j) - this->ds->get_data()->at(i).second.at(j)); //real - expected result

Michal Kravcenko
committed
}

Michal Kravcenko
committed
}
std::vector<double> MSE::get_parameters() {
std::vector<double> output(this->get_dimension());
for (size_t i = 0; i < this->nets[0]->get_n_weights(); ++i) {
output[i] = this->nets[0]->get_parameter_ptr_weights()->at(i);
}
for (size_t i = 0; i < this->nets[0]->get_n_biases(); ++i) {
output[i + this->nets[0]->get_n_weights()] = this->nets[0]->get_parameter_ptr_biases()->at(i);
}
return output;
}
void MSE::set_parameters(std::vector<double>& params) {
this->nets[0]->copy_parameter_space(¶ms);
}
size_t MSE::get_n_data_set() {
return this->ds->get_n_elements();
}
size_t MSE::get_n_test_data_set() {
return this->ds_test->get_n_elements();
}
size_t MSE::get_n_outputs() {
return this->nets[0]->get_n_outputs();
}
void MSE::randomize_parameters(double scaling) {
this->nets[0]->randomize_parameters();
this->nets[0]->scale_parameters(scaling);

Michal Kravcenko
committed
Martin Beseda
committed
ErrorSum::ErrorSum() {
Martin Beseda
committed
this->dimension = 0;
}
Martin Beseda
committed
ErrorSum::~ErrorSum() {
if (this->summand) {
for (auto el: *this->summand) {
if (el) {
delete el;
}
}
Martin Beseda
committed
delete this->summand;
}
}
double ErrorSum::eval_on_test_data(std::vector<double>* weights,
bool verbose) {
//TODO take care of the case, when there are no test data
Martin Beseda
committed
ErrorFunction* ef = nullptr;

Michal Kravcenko
committed
Martin Beseda
committed
for (unsigned int i = 0; i < this->summand->size(); ++i) {

Michal Kravcenko
committed
output += ef->eval_on_test_data(weights) * this->summand_coefficient.at(i);

Michal Kravcenko
committed
}
MPI_Allreduce( MPI_IN_PLACE, &output, 1, MPI_DOUBLE, MPI_SUM, lib4neuro::mpi_active_comm );

Michal Kravcenko
committed
}
double ErrorSum::eval_on_test_data(std::string results_file_path,
bool verbose) {
Martin Beseda
committed
THROW_NOT_IMPLEMENTED_ERROR();
return -1;
}
double ErrorSum::eval_on_test_data(std::ofstream* results_file_path,
bool verbose) {
Martin Beseda
committed
THROW_NOT_IMPLEMENTED_ERROR();
return -1;
}
double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
bool verbose) {
Martin Beseda
committed
THROW_NOT_IMPLEMENTED_ERROR();
return -1;
}
double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
std::string results_file_path,
bool verbose) {
Martin Beseda
committed
THROW_NOT_IMPLEMENTED_ERROR();
return -1;
}
double ErrorSum::eval_on_data_set(lib4neuro::DataSet* data_set,
std::ofstream* results_file_path,
bool verbose) {
Martin Beseda
committed
THROW_NOT_IMPLEMENTED_ERROR();
return -1;
}
double ErrorSum::eval(std::vector<double>* weights,
bool denormalize_data,
bool verbose) {
Martin Beseda
committed
double output = 0.0;
Martin Beseda
committed
ErrorFunction* ef = nullptr;
Martin Beseda
committed
for (unsigned int i = 0; i < this->summand->size(); ++i) {
output += ef->eval(weights) * this->summand_coefficient.at(i);
Martin Beseda
committed
}
Martin Beseda
committed
return output;
double ErrorSum::eval_single_item_by_idx(size_t i,
std::vector<double>* parameter_vector,
std::vector<double>& error_vector) {

Michal Kravcenko
committed
double output = 0.0;
std::fill(error_vector.begin(),
error_vector.end(),
0);

Michal Kravcenko
committed
std::vector<double> error_vector_mem(error_vector.size());
for (size_t j = 0; j < this->summand->size(); ++j) {

Michal Kravcenko
committed
ef = this->summand->at(i);
if (ef) {
output += ef->eval_single_item_by_idx(i,
parameter_vector,
error_vector_mem) * this->summand_coefficient.at(j);

Michal Kravcenko
committed
for (size_t k = 0; k < error_vector_mem.size(); ++k) {
error_vector[k] += error_vector_mem[k] * this->summand_coefficient.at(j);

Michal Kravcenko
committed
}
}
}
return output;
}
void ErrorSum::calculate_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha,
Martin Beseda
committed
size_t batch) {

Michal Kravcenko
committed
Martin Beseda
committed
ErrorFunction* ef = nullptr;
for (size_t i = 0; i < this->summand->size(); ++i) {
ef = this->summand->at(i);

Michal Kravcenko
committed
ef->calculate_error_gradient(params,
grad,
this->summand_coefficient.at(i) * alpha,
batch);

Michal Kravcenko
committed
}
void ErrorSum::calculate_error_gradient_single(std::vector<double>& error_vector,
std::vector<double>& gradient_vector) {

Michal Kravcenko
committed
COUT_INFO("ErrorSum::calculate_error_gradient_single NOT YET IMPLEMENTED!!!");
}
void ErrorSum::analyze_error_gradient(std::vector<double>& params,
std::vector<double>& grad,
double alpha,
size_t batch) {

Michal Kravcenko
committed
ErrorFunction* ef = nullptr;
for (size_t i = 0; i < this->summand->size(); ++i) {
ef = this->summand->at(i);
if (ef) {
ef->calculate_error_gradient(params,
grad,
this->summand_coefficient.at(i) * alpha,
batch);

Michal Kravcenko
committed
}
}
}
void ErrorSum::add_error_function(ErrorFunction* F,
double alpha) {
Martin Beseda
committed
if (!this->summand) {
Martin Beseda
committed
this->summand = new std::vector<ErrorFunction*>(0);
Martin Beseda
committed
}
this->summand->push_back(F);
this->summand_coefficient.push_back(alpha);
Martin Beseda
committed
if (F) {
if (F->get_dimension() > this->dimension) {
this->dimension = F->get_dimension();
}
Martin Beseda
committed
size_t ErrorSum::get_dimension() {
return this->dimension;
}
std::vector<double> ErrorSum::get_parameters() {
return this->summand->at(0)->get_parameters();
}

Michal Kravcenko
committed
void ErrorSum::set_parameters(std::vector<double>& params) {
//TODO may cause problems for general error sum...
for (auto n: *this->summand) {
n->set_parameters(params);
Martin Beseda
committed
void ErrorSum::calculate_residual_gradient(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* gradient,
double h) {
THROW_NOT_IMPLEMENTED_ERROR();
}
double ErrorSum::calculate_single_residual(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* parameters) {
THROW_NOT_IMPLEMENTED_ERROR();
return 0;
}
double ErrorSum::eval_on_single_input(std::vector<double>* input,
std::vector<double>* output,
std::vector<double>* weights) {
double o = 0.0;
for (size_t i = 0; i < this->summand->size(); ++i) {
o += this->summand->at(i)->eval_on_single_input(input,
output,
weights) * this->summand_coefficient.at(i);
}
return o;
}
size_t ErrorSum::get_n_data_set() {
size_t o = 0;
for (size_t i = 0; i < this->summand->size(); ++i) {
o += this->summand->at(i)->get_n_data_set();
//TODO how is this function being used? should this be across all MPI ranks?
return o;
}
size_t ErrorSum::get_n_test_data_set() {
size_t o = 0;
for (size_t i = 0; i < this->summand->size(); ++i) {
o += this->summand->at(i)->get_n_test_data_set();
}
return o;
}
size_t ErrorSum::get_n_outputs() {
size_t o = 0;
for (size_t i = 0; i < this->summand->size(); ++i) {
o += this->summand->at(i)->get_n_outputs();
}
return o;
}
void ErrorSum::divide_data_train_test(double percent) {
for (auto n: *this->summand) {
n->divide_data_train_test(percent);
}
}
size_t ErrorSum::divide_data_worst_subset(
std::vector<size_t> &subset_indices,
std::vector<bool> &active_subset,
std::vector<float> &entry_errors
) {
size_t output = 0;
assert( false );
return output;
}
void ErrorSum::return_full_data_set_for_training() {
n->return_full_data_set_for_training();
}
}
void ErrorSum::get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs) {
for (auto n: *this->summand) {
std::vector<double> rhs_loc;
n->get_jacobian_and_rhs(jacobian,
rhs_loc);
size_t curr_size = rhs.size();
rhs.resize(curr_size + rhs_loc.size());
for (size_t i = 0; i < rhs_loc.size(); ++i) {
rhs.at(i + curr_size) = rhs_loc.at(i);
}
}
}
void ErrorSum::get_jacobian_and_rhs(std::vector<std::vector<double>>& jacobian,
std::vector<double>& rhs,
std::vector<std::pair<std::vector<double>, std::vector<double>>>& data) {
THROW_NOT_IMPLEMENTED_ERROR();
}
void ErrorSum::randomize_parameters(double scaling) {
for (auto n: *this->summand) {
n->randomize_parameters(scaling);
}
}