From cc537040f721202ad1d54ff879536c10412db453 Mon Sep 17 00:00:00 2001 From: Martin Beseda <martin.beseda@vsb.cz> Date: Fri, 18 Jan 2019 16:38:46 +0100 Subject: [PATCH] FIX: Added missing parameter (batch size) into Gradient Descent method. --- src/LearningMethods/GradientDescent.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp index 4c788c40..5a9d62b9 100644 --- a/src/LearningMethods/GradientDescent.cpp +++ b/src/LearningMethods/GradientDescent.cpp @@ -89,7 +89,7 @@ namespace lib4neuro { /* reset of the current gradient */ std::fill(gradient_current->begin(), gradient_current->end(), 0.0); // std::fill(gradient_mem.begin(), gradient_mem.end(), 0.0); - ef.calculate_error_gradient(*params_current, *gradient_current); + ef.calculate_error_gradient(*params_current, *gradient_current, this->batch); // double error_analytical = this->calculate_gradient( ef.get_dataset()->get_data(), (size_t)2, params_current, gradient_current ); // for(size_t k = 0; k < gradient_mem.size(); ++k){ @@ -163,13 +163,13 @@ namespace lib4neuro { } else { COUT_INFO("Gradient Descent method converged after " << this->maximum_niters-iter_idx - << "iterations." + << " iterations." << std::endl); #ifdef L4N_DEBUG if(ofs && ofs->is_open()) { *ofs << "Gradient Descent method converged after " << this->maximum_niters-iter_idx - << "iterations." + << " iterations." << std::endl; } #endif -- GitLab