diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index 5727490f88d5dba8d59a8ea929f3de2fcdf2ae25..3832a2328fa836a6015fa8e5533da1a88b58af7b 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -13,16 +13,16 @@ namespace lib4neuro {
     GradientDescent::GradientDescent(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
-        this->optimal_parameters = new std::vector<double>(0);
+//        this->optimal_parameters = new std::vector<double>(0);
         this->maximum_niters = max_iters;
         this->batch = batch;
     }
 
     GradientDescent::~GradientDescent() {
-        if (this->optimal_parameters) {
-            delete this->optimal_parameters;
-            this->optimal_parameters = nullptr;
-        }
+//        if (this->optimal_parameters) {
+//            delete this->optimal_parameters;
+//            this->optimal_parameters = nullptr;
+//        }
     }
 
     void GradientDescent::eval_step_size_mk(double &gamma,
@@ -295,8 +295,8 @@ namespace lib4neuro {
 #endif
         }
 
-        *this->optimal_parameters = *params_current;
-        ef.get_network_instance()->copy_parameter_space( this->optimal_parameters );
+        this->optimal_parameters = *params_current;
+        ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
 
         delete gradient_current;
         delete gradient_prev;
@@ -305,6 +305,6 @@ namespace lib4neuro {
     }
 
     std::vector<double> *GradientDescent::get_parameters() {
-        return this->optimal_parameters;
+        return &this->optimal_parameters;
     }
 }
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index 352def273f96e59c5098d0e977d0a6548493eaa6..325078b00dc6017d9919258e8e2f677040c4f293 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -44,7 +44,7 @@ namespace lib4neuro {
         /**
          * Vector of minima coordinates
          */
-        std::vector<double> *optimal_parameters;
+        std::vector<double> optimal_parameters;
 
         /**
          * Adaptive calculation of the step-size based on several historical characteristics.
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index 8d5f9ce7f88e6fe31ff6fb69e6b8a03fb1248b25..05569b85de4b68e797a1e4db4928f777c017f68f 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -13,13 +13,10 @@ namespace lib4neuro {
     LearningSequence::LearningSequence( double tolerance, int max_n_cycles ){
         this->tol = tolerance;
         this->max_number_of_cycles = max_n_cycles;
-        this->best_parameters = new std::vector<double>();
+//        this->best_parameters = new std::vector<double>();
     }
 
-    LearningSequence::~LearningSequence() {
-
-
-    }
+    LearningSequence::~LearningSequence() = default;
 
     std::vector<double>* LearningSequence::get_parameters() {
         if( this->learning_sequence.size() > 0 ){
@@ -28,7 +25,7 @@ namespace lib4neuro {
         return nullptr;
     }
 
-    void LearningSequence::add_learning_method(LearningMethod *method) {
+    void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) {
         this->learning_sequence.push_back( method );
     }
 
@@ -49,16 +46,16 @@ namespace lib4neuro {
 
                 if( error < the_best_error ){
                     the_best_error = error;
-                    *this->best_parameters = *ef.get_parameters();
+                    this->best_parameters = *ef.get_parameters();
                 }
 
                 if( error <= this->tol ){
-                    ef.get_network_instance()->copy_parameter_space( this->best_parameters );
+                    ef.get_network_instance()->copy_parameter_space( &this->best_parameters );
                     return;
                 }
             }
             COUT_DEBUG("Cycle: " << cycle_idx << ", the lowest error: " << the_best_error << std::endl );
         }
-        ef.get_network_instance()->copy_parameter_space( this->best_parameters );
+        ef.get_network_instance()->copy_parameter_space( &this->best_parameters );
     }
 }
\ No newline at end of file
diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h
index bb97591093d0504099fb1bf078b4ab3629679297..3f3f4b5d0c01c0861c365c05451006ddaf227395 100644
--- a/src/LearningMethods/LearningSequence.h
+++ b/src/LearningMethods/LearningSequence.h
@@ -24,7 +24,7 @@ namespace lib4neuro {
         /**
          *
          */
-        std::vector<LearningMethod*> learning_sequence;
+        std::vector<std::shared_ptr<LearningMethod>> learning_sequence;
 
         /**
          *
@@ -34,7 +34,7 @@ namespace lib4neuro {
         /**
          *
          */
-        std::vector<double> *best_parameters = nullptr;
+        std::vector<double> best_parameters; // = nullptr;
 
         /**
          *
@@ -71,7 +71,7 @@ namespace lib4neuro {
          *
          * @param method
          */
-        LIB4NEURO_API void add_learning_method( LearningMethod * method );
+        LIB4NEURO_API void add_learning_method( std::shared_ptr<LearningMethod> method );
     };
 
 }
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index b9db3539c49bc56087f010fc34fd908b41b91cc2..d10eab7b2207ad46844378b67729edac82e11df8 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -31,7 +31,7 @@ struct lib4neuro::LevenbergMarquardt::LevenbergMarquardtImpl {
     /**
      * Vector of minimum coordinates
      */
-    std::vector<double> *optimal_parameters;
+    std::vector<double> optimal_parameters;
 
     /**
      * Returns Jacobian matrix of the residual function using the backpropagation algorithm
@@ -92,7 +92,7 @@ namespace lib4neuro {
         this->p_impl->lambda_increase = lambda_increase;
         this->p_impl->lambda_decrease = lambda_decrease;
         this->p_impl->maximum_niters = max_iters;
-        this->p_impl->optimal_parameters = new std::vector<double>();
+        this->p_impl->optimal_parameters; // = new std::vector<double>();
     }
 
     void LevenbergMarquardt::optimize(lib4neuro::ErrorFunction& ef,
@@ -229,16 +229,16 @@ namespace lib4neuro {
         COUT_DEBUG("Iteration: " << iter_counter << " Current error: " << current_err << ", Current gradient norm: " << gradient_norm << ", Direction norm: " << update_norm << std::endl);
 
         /* Store the optimized parameters */
-        *this->p_impl->optimal_parameters = *params_current;
+        this->p_impl->optimal_parameters = *params_current;
 
-        ef.get_network_instance()->copy_parameter_space(this->p_impl->optimal_parameters);
+        ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters);
 
         delete params_tmp;
 
     }
 
     std::vector<double>* LevenbergMarquardt::get_parameters() {
-        return this->p_impl->optimal_parameters;
+        return &this->p_impl->optimal_parameters;
     }
 
     LevenbergMarquardt::~LevenbergMarquardt() = default;
diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp
index 84cabc9e347cf39da3bac71a24e6ec1a8c87ef78..f5362566b1ff8c99f2ccd10628e174dba7caea01 100644
--- a/src/LearningMethods/RandomSolution.cpp
+++ b/src/LearningMethods/RandomSolution.cpp
@@ -11,21 +11,21 @@
 namespace lib4neuro {
 
     RandomSolution::RandomSolution() {
-        this->optimal_parameters =  new std::vector<double>();
+//        this->optimal_parameters =  new std::vector<double>();
     }
 
     RandomSolution::~RandomSolution() {
 
     }
 
-    std::vector<double> *RandomSolution::get_parameters() {
-        return this->optimal_parameters;
+    std::vector<double>* RandomSolution::get_parameters() {
+        return &this->optimal_parameters;
     }
 
     void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
         ef.get_network_instance()->randomize_parameters();
-        *this->optimal_parameters = *ef.get_parameters();
-        COUT_INFO("Producing a random solution... error: " << ef.eval(this->optimal_parameters) << std::endl);
+        this->optimal_parameters = *ef.get_parameters();
+        COUT_INFO("Producing a random solution... error: " << ef.eval(&this->optimal_parameters) << std::endl);
     }
 
 }
\ No newline at end of file
diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h
index 5a18b9dcd6464679ed80d8b301325236178b49a0..38fbdfdfdf982abd989a0ac6022c7c2682cbf1c1 100644
--- a/src/LearningMethods/RandomSolution.h
+++ b/src/LearningMethods/RandomSolution.h
@@ -17,7 +17,7 @@ namespace lib4neuro {
     class RandomSolution : public lib4neuro::LearningMethod {
 
     private:
-        std::vector<double> *optimal_parameters;
+        std::vector<double> optimal_parameters;
 
     protected:
     public: