diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index a91df200719c58509e464fe416bea63845e16c2b..7e58d5fd341f8d7c911b2d0b22acdfb4a139fb23 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -57,7 +57,6 @@ if ("${BUILD_LIB}" STREQUAL "yes")
             LearningMethods/GradientDescentSingleItem.cpp
             LearningMethods/LearningSequence.cpp
             LearningMethods/RandomSolution.cpp
-            LearningMethods/KalmanFilter.cpp
     )
 
     # FileSystem C++ library - has to be linked manually in GCC-8
diff --git a/src/LearningMethods/KalmanFilter.cpp b/src/LearningMethods/KalmanFilter.cpp
deleted file mode 100644
index 4d7bd17dba3a02fcf6d96ec4874c02ba9959fb7f..0000000000000000000000000000000000000000
--- a/src/LearningMethods/KalmanFilter.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * DESCRIPTION OF THE FILE
- *
- * @author Michal KravÄŤenko
- * @date 25.2.19 -
- */
-
-#include <armadillo>
-#include "KalmanFilter.h"
-namespace lib4neuro {
-
-    std::vector<double> *KalmanFilter::get_parameters() {
-        return this->optimal_parameters;
-    }
-
-    void KalmanFilter::optimize(ErrorFunction &ef, std::ofstream *ofs) {
-/* Copy data set max and min values, if it's normalized */
-        if(ef.get_dataset()->is_normalized()) {
-            ef.get_network_instance()->set_normalization_strategy_instance(
-                    ef.get_dataset()->get_normalization_strategy());
-        }
-
-        double current_err = ef.eval();
-
-        COUT_INFO("Finding a solution via a KalmanFilter method... Starting error: " << current_err << std::endl);
-        if(ofs && ofs->is_open()) {
-            *ofs << "Finding a solution via a KalmanFilter method... Starting error: " << current_err << std::endl;
-        }
-
-        DataSet *data = ef.get_dataset();
-        size_t dim_train_set = data->get_n_elements();
-        if( this->batch_size > 0 ){
-            dim_train_set = this->batch_size;
-        }
-        size_t dim_out = ef.get_network_instance()->get_n_outputs() * dim_train_set;
-        size_t n_parameters = ef.get_dimension();
-
-        this->optimal_parameters->resize(n_parameters);
-
-        arma::Mat<double> H, P, K_partial_i, K, P_new, I, MEM;
-
-        //-------------------//
-        // Solver iterations //
-        //-------------------//
-        arma::Col<double> solution(*ef.get_parameters()), solution_mem(n_parameters), error(dim_out), direction;
-
-        std::random_device seeder;
-        std::mt19937 gen(seeder());
-        std::uniform_real_distribution<double> new_entry(-1, 1);
-
-        P.reshape(n_parameters, n_parameters);
-        for( size_t r = 0; r < n_parameters; ++r ){
-            for( size_t c = 0; c < n_parameters; ++c ){
-                P.at( r, c ) = new_entry(gen);
-            }
-        }
-
-//        P = arma::eye(n_parameters, n_parameters);
-        I = arma::eye(n_parameters, n_parameters);
-        H.reshape(dim_out, n_parameters);
-
-        /* Control Parameters */
-        double jacobian_norm = 0.0, error_norm = 1.0;
-        int iter_idx = this->max_iters - 1, it = 0;
-
-        std::vector<std::vector<double>> jacobian;
-        std::vector<double> partial_error;
-
-
-        std::vector<double> parameters(*ef.get_parameters());
-
-        while( error_norm > this->tolerance && iter_idx != 0 ){
-            it++;
-            iter_idx--;
-            ef.eval(&parameters);
-            /* Jacobian and error vector recalculation */
-
-            H.fill(0.0);
-            size_t row_idx = 0;
-            std::vector<std::pair<std::vector<double>, std::vector<double>>> subset = data->get_random_data_batch(this->batch_size);
-            for ( auto item: subset ){
-                ef.get_network_instance()->get_jacobian( jacobian, item, partial_error );
-
-                for(size_t ri = 0; ri < jacobian.size(); ++ri){
-                    error.at(row_idx) = partial_error[ri];
-                    for(size_t ci = 0; ci < n_parameters; ++ci){
-                        H.at(row_idx, ci) = jacobian[ri][ci];
-                    }
-                    row_idx++;
-                }
-            }
-//            error.print();
-//            H.print();
-            /* End of Jacobian and error vector recalculation */
-
-            K_partial_i = H*P*H.t();
-            K_partial_i.print();
-//            K = P * H * (K_partial_i.i());
-
-            solution_mem = arma::solve(K_partial_i, error);
-            direction = P * H.t() * solution_mem;
-            direction.print();
-
-            solution = solution + direction;
-
-            MEM = arma::solve(K_partial_i, H);
-            P_new = (I - P * H.t() * MEM) * P;
-//            std::cout << "   [X]" << std::endl;
-            P = P_new;
-
-            error_norm = 0.0;
-            for( auto e: error ){
-                error_norm += e * e;
-            }
-            error_norm = error_norm / dim_train_set;
-            std::cout.flush();
-            std::copy(parameters.begin(), parameters.end(), solution.begin());
-            COUT_DEBUG("Iteration: " << it << " Current error: " << error_norm << std::endl);
-        }
-
-        COUT_DEBUG("Iteration: " << it << " Current error: " << ef.eval() << std::endl);
-
-        /* Store the optimized parameters */
-        for( size_t i = 0; i < solution.size(); ++i ){
-            this->optimal_parameters->at( i ) = solution.at( i );
-        }
-
-        ef.get_network_instance()->copy_parameter_space(this->optimal_parameters);
-    }
-
-    KalmanFilter::KalmanFilter(double tol, int max_it, unsigned long bs) {
-        COUT_INFO("This method is in development! If used in optimization, the results will be incorrect!" << std::endl);
-        this->tolerance = tol;
-        this->max_iters = max_it;
-        this->batch_size = bs;
-        this->optimal_parameters = new std::vector<double>();
-    }
-
-    KalmanFilter::~KalmanFilter() {
-        if( this->optimal_parameters ){
-            delete this->optimal_parameters;
-        }
-    }
-
-}
\ No newline at end of file
diff --git a/src/LearningMethods/KalmanFilter.h b/src/LearningMethods/KalmanFilter.h
deleted file mode 100644
index 9dd055141398211c045ed8b88274a37af5e655ed..0000000000000000000000000000000000000000
--- a/src/LearningMethods/KalmanFilter.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * DESCRIPTION OF THE FILE
- *
- * @author Michal KravÄŤenko
- * @date 25.2.19 -
- */
-
-#ifndef LIB4NEURO_KALLMANFILTER_H
-#define LIB4NEURO_KALLMANFILTER_H
-
-#include <memory>
-
-#include "LearningMethod.h"
-#include "../ErrorFunction/ErrorFunctions.h"
-#include "../DataSet/DataSet.h"
-#include "../message.h"
-
-namespace lib4neuro {
-
-    class KalmanFilter : public lib4neuro::GradientLearningMethod {
-
-    private:
-
-        double tolerance;
-
-        int max_iters;
-
-        std::vector<double> *optimal_parameters = nullptr;
-
-        unsigned long batch_size;
-
-    protected:
-    public:
-        KalmanFilter( double tol, int max_it, unsigned long bs );
-
-        ~KalmanFilter();
-
-        virtual void optimize(ErrorFunction &ef,
-                              std::ofstream *ofs = nullptr) override;
-
-        virtual std::vector<double> *get_parameters() override;
-    };
-
-}
-
-
-#endif //LIB4NEURO_KALLMANFILTER_H