diff --git a/src/DataSet/DataSet.cpp b/src/DataSet/DataSet.cpp
index 3d4a6d6d97f5d3e9247382dcbda1c82fe032cdf3..69b4b86c279cd91982a8e74d99bedb8b7ffdcf0c 100644
--- a/src/DataSet/DataSet.cpp
+++ b/src/DataSet/DataSet.cpp
@@ -69,8 +69,7 @@ namespace lib4neuro {
         this->output_dim = 1;
 
         if(ns) {
-            std::shared_ptr<NormalizationStrategy> ns_tmp;
-            ns_tmp.reset(ns);
+            std::shared_ptr<NormalizationStrategy> ns_tmp = std::make_shared<NormalizationStrategy>(ns);
             this->normalization_strategy = ns_tmp;
 //            this->max_min_inp_val.emplace_back(this->normalization_strategy->get_max_value());
 //            this->max_min_inp_val.emplace_back(this->normalization_strategy->get_min_value());
diff --git a/src/ErrorFunction/ErrorFunctions.cpp b/src/ErrorFunction/ErrorFunctions.cpp
index b57e7014c4771b188705a45a886b6978cd08ff72..02744c67aa09a52d5dfa72aa8997b8e08f495b3e 100644
--- a/src/ErrorFunction/ErrorFunctions.cpp
+++ b/src/ErrorFunction/ErrorFunctions.cpp
@@ -75,8 +75,10 @@ namespace lib4neuro {
         return this->ds_test;
     }
 
-    std::vector<double>* ErrorFunction::get_parameters() {
-        std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases());
+    std::shared_ptr<std::vector<double>> ErrorFunction::get_parameters() {
+        std::shared_ptr<std::vector<double>> output;
+        output.reset(new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases()));
+//        std::vector<double>* output = new std::vector<double>(this->net->get_n_weights() + this->net->get_n_biases());
 
         size_t i = 0;
 
@@ -378,7 +380,7 @@ namespace lib4neuro {
         //TODO check input vector sizes - they HAVE TO be allocated before calling this function
 
         size_t n_parameters = this->get_dimension();
-        std::vector<double>* parameters = this->get_parameters();
+        std::shared_ptr<std::vector<double>> parameters = this->get_parameters();
 
         double delta;  // Complete step size
         double former_parameter_value;
@@ -392,11 +394,11 @@ namespace lib4neuro {
             if(delta != 0) {
                 /* Computation of f_val1 = f(x + delta) */
                 parameters->at(i) = former_parameter_value + delta;
-                f_val1 = this->calculate_single_residual(input, output, parameters);
+                f_val1 = this->calculate_single_residual(input, output, parameters.get());
 
                 /* Computation of f_val2 = f(x - delta) */
                 parameters->at(i) = former_parameter_value - delta;
-                f_val2 = this->calculate_single_residual(input, output, parameters);
+                f_val2 = this->calculate_single_residual(input, output, parameters.get());
 
                 gradient->at(i) = (f_val1 - f_val2) / (2*delta);
             }
@@ -676,7 +678,7 @@ namespace lib4neuro {
         return this->dimension;
     }
 
-    std::vector<double>* ErrorSum::get_parameters() {
+    std::shared_ptr<std::vector<double>> ErrorSum::get_parameters() {
         return this->summand->at(0)->get_parameters();
     }
 
diff --git a/src/ErrorFunction/ErrorFunctions.h b/src/ErrorFunction/ErrorFunctions.h
index 87fcfc2d8313bba4d1b8b1ae083f9cdbcf12c2ed..ef799c5c51ac1ef2e167ce84a50cb9ab3b1bf059 100644
--- a/src/ErrorFunction/ErrorFunctions.h
+++ b/src/ErrorFunction/ErrorFunctions.h
@@ -27,7 +27,7 @@ namespace lib4neuro {
          * @param weights
          * @return
          */
-        virtual double eval(std::vector<double>* weights = nullptr, bool denormalize_data=false,
+        virtual double eval(std::shared_ptr<std::vector<double>> weights = nullptr, bool denormalize_data=false,
                 bool verbose = false) = 0;
 
         /**
@@ -66,7 +66,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        virtual std::vector<double>* get_parameters();
+        virtual std::shared_ptr<std::vector<double>> get_parameters();
 
         /**
          * @return
@@ -533,7 +533,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double>* get_parameters() override;
+        LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
 
         /**
          *
diff --git a/src/LearningMethods/GradientDescent.cpp b/src/LearningMethods/GradientDescent.cpp
index 3832a2328fa836a6015fa8e5533da1a88b58af7b..e041bcd3854b0013f03e8c2fbc9b5ef15df6c940 100644
--- a/src/LearningMethods/GradientDescent.cpp
+++ b/src/LearningMethods/GradientDescent.cpp
@@ -111,11 +111,11 @@ namespace lib4neuro {
         size_t n_parameters = ef.get_dimension();
 
 
-        std::vector<double> *gradient_current = new std::vector<double>(n_parameters);
-        std::vector<double> *gradient_prev = new std::vector<double>(n_parameters);
-        std::vector<double> *params_current = ef.get_parameters();
-        std::vector<double> *params_prev = new std::vector<double>(n_parameters);
-        std::vector<double> *ptr_mem;
+        std::shared_ptr<std::vector<double>> gradient_current(new std::vector<double>(n_parameters));
+        std::shared_ptr<std::vector<double>> gradient_prev(new std::vector<double>(n_parameters));
+        std::shared_ptr<std::vector<double>> params_current = ef.get_parameters();
+        std::shared_ptr<std::vector<double>> params_prev(new std::vector<double>(n_parameters));
+        std::shared_ptr<std::vector<double>> ptr_mem;
 
 //    std::vector<double> gradient_mem( n_parameters );
 //    std::vector<double> parameters_analytical( n_parameters );
@@ -124,7 +124,7 @@ namespace lib4neuro {
         std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
         std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
 
-        val = ef.eval(params_current);
+        val = ef.eval(params_current.get());
         double coeff = 1;
         bool it_analyzed = false;
         size_t counter_good_guesses = 0, counter_bad_guesses = 0, counter_simplified_direction_good = 0, counter_simplified_direction_bad = 0;
@@ -181,7 +181,7 @@ namespace lib4neuro {
             for (i = 0; i < gradient_current->size(); ++i) {
                 (*params_prev)[i] = (*params_current)[i] - cooling * gamma * (*gradient_current)[i];
             }
-            val = ef.eval(params_prev);
+            val = ef.eval(params_prev.get());
 
 //            val = prev_val + 1.0;
 //            coeff = 1;
@@ -298,13 +298,15 @@ namespace lib4neuro {
         this->optimal_parameters = *params_current;
         ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
 
-        delete gradient_current;
-        delete gradient_prev;
-        delete params_current;
-        delete params_prev;
+//        delete gradient_current;
+//        delete gradient_prev;
+//        delete params_current;
+//        delete params_prev;
     }
 
-    std::vector<double> *GradientDescent::get_parameters() {
-        return &this->optimal_parameters;
+    std::shared_ptr<std::vector<double>> GradientDescent::get_parameters() {
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset(&this->optimal_parameters);
+        return ret;
     }
 }
diff --git a/src/LearningMethods/GradientDescent.h b/src/LearningMethods/GradientDescent.h
index 325078b00dc6017d9919258e8e2f677040c4f293..96b6429bc666fe796f77c497127ed6b53fb2db86 100644
--- a/src/LearningMethods/GradientDescent.h
+++ b/src/LearningMethods/GradientDescent.h
@@ -116,7 +116,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double> *get_parameters() override;
+        LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
     };
 }
 
diff --git a/src/LearningMethods/GradientDescentBB.cpp b/src/LearningMethods/GradientDescentBB.cpp
index 3f86452e017cf2602cef36479f0f534c2f82f689..86d67b6d8f9f972e6cf9850d0c470c9c927d8a98 100644
--- a/src/LearningMethods/GradientDescentBB.cpp
+++ b/src/LearningMethods/GradientDescentBB.cpp
@@ -12,16 +12,16 @@ namespace lib4neuro {
     GradientDescentBB::GradientDescentBB(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
-        this->optimal_parameters = new std::vector<double>(0);
+//        this->optimal_parameters = new std::vector<double>(0);
         this->maximum_niters = max_iters;
         this->batch = batch;
     }
 
     GradientDescentBB::~GradientDescentBB() {
-        if (this->optimal_parameters) {
-            delete this->optimal_parameters;
-            this->optimal_parameters = nullptr;
-        }
+//        if (this->optimal_parameters) {
+//            delete this->optimal_parameters;
+//            this->optimal_parameters = nullptr;
+//        }
     }
 
 
@@ -53,13 +53,13 @@ namespace lib4neuro {
         size_t n_parameters = ef.get_dimension();
 
 
-        std::vector<double> *gradient_current = new std::vector<double>(n_parameters);
-        std::vector<double> *gradient_prev = new std::vector<double>(n_parameters);
-        std::vector<double> *params_current = ef.get_parameters();
-        std::vector<double> *params_prev = new std::vector<double>(n_parameters);
-        std::vector<double> *params_best = new std::vector<double>(*params_current);
+        std::shared_ptr<std::vector<double>> gradient_current(new std::vector<double>(n_parameters));
+        std::shared_ptr<std::vector<double>> gradient_prev(new std::vector<double>(n_parameters));
+        std::shared_ptr<std::vector<double>> params_current = ef.get_parameters();
+        std::shared_ptr<std::vector<double>> params_prev(new std::vector<double>(n_parameters));
+        std::shared_ptr<std::vector<double>> params_best(new std::vector<double>(*params_current));
 
-        std::vector<double> *ptr_mem;
+        std::shared_ptr<std::vector<double>> ptr_mem;
 
         double alpha = -1.0, cc, gg;
         std::vector<double> dot__( 3 );
@@ -68,7 +68,7 @@ namespace lib4neuro {
 
         std::fill(gradient_current->begin(), gradient_current->end(), 0.0);
         std::fill(gradient_prev->begin(), gradient_prev->end(), 0.0);
-        val = ef.eval(params_current);
+        val = ef.eval(params_current.get());
         val_best = val;
 
 //        this-> batch = 0;
@@ -143,7 +143,7 @@ namespace lib4neuro {
             params_prev = params_current;
             params_current = ptr_mem;
 
-            val = ef.eval(params_current);
+            val = ef.eval(params_current.get());
             if( val < val_best ){
                 val_best = val;
 
@@ -209,21 +209,22 @@ namespace lib4neuro {
 #endif
         }
 
-        *this->optimal_parameters = *params_best;
-
+        this->optimal_parameters = *params_best;
 
 //        ef.analyze_error_gradient(*params_current, *gradient_current, 1.0, this->batch);
-        ef.get_network_instance()->copy_parameter_space(this->optimal_parameters);
-
-        delete gradient_current;
-        delete gradient_prev;
-        delete params_current;
-        delete params_prev;
-        delete params_best;
+        ef.get_network_instance()->copy_parameter_space(&this->optimal_parameters);
+//
+//        delete gradient_current;
+//        delete gradient_prev;
+//        delete params_current;
+//        delete params_prev;
+//        delete params_best;
     }
 
-    std::vector<double> *GradientDescentBB::get_parameters() {
-        return this->optimal_parameters;
+    std::shared_ptr<std::vector<double>> GradientDescentBB::get_parameters() {
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset(&this->optimal_parameters);
+        return ret;
     }
 
 }
diff --git a/src/LearningMethods/GradientDescentBB.h b/src/LearningMethods/GradientDescentBB.h
index 0874fa074bbb19d42d916fae60715216bfae9dfa..568c856525c4132b4c31eb4d3b3622502a8351b0 100644
--- a/src/LearningMethods/GradientDescentBB.h
+++ b/src/LearningMethods/GradientDescentBB.h
@@ -55,7 +55,7 @@ namespace lib4neuro {
         /**
          * Vector of minima coordinates
          */
-        std::vector<double> *optimal_parameters;
+        std::vector<double> optimal_parameters;
 
     public:
 
@@ -82,7 +82,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double> *get_parameters() override;
+        LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
     };
 
 }
diff --git a/src/LearningMethods/GradientDescentSingleItem.cpp b/src/LearningMethods/GradientDescentSingleItem.cpp
index 490e6f5d335986297d6213e93de9f38495113809..ea6b41a4a84d0bd7327b85380f09522054da1a94 100644
--- a/src/LearningMethods/GradientDescentSingleItem.cpp
+++ b/src/LearningMethods/GradientDescentSingleItem.cpp
@@ -14,16 +14,16 @@ namespace lib4neuro {
     GradientDescentSingleItem::GradientDescentSingleItem(double epsilon, size_t n_to_restart, int max_iters, size_t batch) {
         this->tolerance = epsilon;
         this->restart_frequency = n_to_restart;
-        this->optimal_parameters = new std::vector<double>(0);
+//        this->optimal_parameters = new std::vector<double>(0);
         this->maximum_niters = max_iters;
         this->batch = batch;
     }
 
     GradientDescentSingleItem::~GradientDescentSingleItem() {
-        if (this->optimal_parameters) {
-            delete this->optimal_parameters;
-            this->optimal_parameters = nullptr;
-        }
+//        if (this->optimal_parameters) {
+//            delete this->optimal_parameters;
+//            this->optimal_parameters = nullptr;
+//        }
     }
 
 
@@ -98,13 +98,15 @@ namespace lib4neuro {
         }
         COUT_DEBUG("Iteration: " << iter << ", Total elements in train set: " << total_elements << ", # of elements with high error: " << updated_elements << ", max. error: " << max_error << std::endl);
 
-        *this->optimal_parameters = parameter_vector;
-        ef.get_network_instance()->copy_parameter_space( this->optimal_parameters );
+        this->optimal_parameters = parameter_vector;
+        ef.get_network_instance()->copy_parameter_space( &this->optimal_parameters );
 
     }
 
-    std::vector<double> *GradientDescentSingleItem::get_parameters() {
-        return this->optimal_parameters;
+    std::shared_ptr<std::vector<double>> GradientDescentSingleItem::get_parameters() {
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset(&this->optimal_parameters);
+        return ret;
     }
 
 }
diff --git a/src/LearningMethods/GradientDescentSingleItem.h b/src/LearningMethods/GradientDescentSingleItem.h
index b27d1e62ad884aa48001b1f3398a84a75e9b55a3..bc57967f08173ae3abb5dc9ebbf65aa11a4d61c6 100644
--- a/src/LearningMethods/GradientDescentSingleItem.h
+++ b/src/LearningMethods/GradientDescentSingleItem.h
@@ -56,7 +56,7 @@ namespace lib4neuro {
         /**
          * Vector of minima coordinates
          */
-        std::vector<double> *optimal_parameters;
+        std::vector<double> optimal_parameters;
 
 
     protected:
@@ -97,7 +97,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double> *get_parameters() override;
+        LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
     };
 
 }
diff --git a/src/LearningMethods/LearningMethod.h b/src/LearningMethods/LearningMethod.h
index 7173ce75870c1df8b58dd4dd2d0a31ec96de8507..87c129ae2ff4a52ed0a04a7a55fcbf74d8920720 100644
--- a/src/LearningMethods/LearningMethod.h
+++ b/src/LearningMethods/LearningMethod.h
@@ -24,7 +24,7 @@ namespace lib4neuro {
         /**
          * Updates the optimal weight&bias settings in the passed vector
          */
-        virtual std::vector<double>* get_parameters() = 0;
+        virtual std::shared_ptr<std::vector<double>> get_parameters() = 0;
     };
 
     class GradientLearningMethod : public LearningMethod {
diff --git a/src/LearningMethods/LearningSequence.cpp b/src/LearningMethods/LearningSequence.cpp
index 7109a7797e9bfb01e93629303dd74698e54e3272..6bd31a5179c87a6ea4cd9f7a862d90ed2696d740 100644
--- a/src/LearningMethods/LearningSequence.cpp
+++ b/src/LearningMethods/LearningSequence.cpp
@@ -18,11 +18,14 @@ namespace lib4neuro {
 
     LearningSequence::~LearningSequence() = default;
 
-    std::vector<double>* LearningSequence::get_parameters() {
+    std::shared_ptr<std::vector<double>> LearningSequence::get_parameters() {
         if( this->learning_sequence.size() > 0 ){
-            return this->learning_sequence[0]->get_parameters( );
+            return this->learning_sequence.at(0).get()->get_parameters( );
         }
-        return nullptr;
+
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset();
+        return ret;
     }
 
     void LearningSequence::add_learning_method(std::shared_ptr<LearningMethod> method) {
@@ -51,11 +54,11 @@ namespace lib4neuro {
                 puts("*********************** 8");
 
                 m->optimize( ef, ofs );
-                error = ef.eval(m->get_parameters());
+                error = ef.eval(m->get_parameters().get());
 
                 puts("*********************** 9");
 
-                ef.get_network_instance()->copy_parameter_space(m->get_parameters());
+                ef.get_network_instance()->copy_parameter_space(m->get_parameters().get());
 
                 puts("*********************** 10");
 
diff --git a/src/LearningMethods/LearningSequence.h b/src/LearningMethods/LearningSequence.h
index 3f3f4b5d0c01c0861c365c05451006ddaf227395..ee102f36c293bcf77f32d433e1e2939357c15209 100644
--- a/src/LearningMethods/LearningSequence.h
+++ b/src/LearningMethods/LearningSequence.h
@@ -65,7 +65,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double> *get_parameters() override;
+        LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
 
         /**
          *
diff --git a/src/LearningMethods/LevenbergMarquardt.cpp b/src/LearningMethods/LevenbergMarquardt.cpp
index d10eab7b2207ad46844378b67729edac82e11df8..29873b0ab87e8f1c847b2aff2c6ffa10cdd4d83d 100644
--- a/src/LearningMethods/LevenbergMarquardt.cpp
+++ b/src/LearningMethods/LevenbergMarquardt.cpp
@@ -122,8 +122,11 @@ namespace lib4neuro {
         if( this->p_impl->batch_size > 0 ){
             n_data_points = this->p_impl->batch_size;
         }
-        std::vector<double> *params_current = ef.get_parameters();
-        std::vector<double> *params_tmp = new std::vector<double>(n_parameters);
+        std::shared_ptr<std::vector<double>> params_current = ef.get_parameters();
+
+        std::shared_ptr<std::vector<double>> params_tmp;
+        params_tmp.reset(new std::vector<double>(n_parameters));
+//        std::vector<double> *params_tmp = new std::vector<double>(n_parameters);
         arma::Mat<double> J(n_data_points, n_parameters);  // Jacobian matrix
         arma::Mat<double> H(n_data_points, n_parameters);  // Hessian matrix
         arma::Mat<double> H_new(n_data_points, n_parameters);
@@ -191,7 +194,7 @@ namespace lib4neuro {
                 update_norm += update.at(i) * update.at(i);
             }
             update_norm = std::sqrt(update_norm);
-            current_err = ef.eval(params_tmp);
+            current_err = ef.eval(params_tmp.get());
 
             /* Check, if the parameter update improved the function */
             if(current_err < prev_err) {
@@ -233,12 +236,14 @@ namespace lib4neuro {
 
         ef.get_network_instance()->copy_parameter_space(&this->p_impl->optimal_parameters);
 
-        delete params_tmp;
+//        delete params_tmp;
 
     }
 
-    std::vector<double>* LevenbergMarquardt::get_parameters() {
-        return &this->p_impl->optimal_parameters;
+    std::shared_ptr<std::vector<double>> LevenbergMarquardt::get_parameters() {
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset(&this->p_impl->optimal_parameters);
+        return ret;
     }
 
     LevenbergMarquardt::~LevenbergMarquardt() = default;
diff --git a/src/LearningMethods/LevenbergMarquardt.h b/src/LearningMethods/LevenbergMarquardt.h
index 5cc301d63f0a011dc50b9caf223ef7b9e4f20ff1..f55459c6fbb1068325464b215825928f68965ba1 100644
--- a/src/LearningMethods/LevenbergMarquardt.h
+++ b/src/LearningMethods/LevenbergMarquardt.h
@@ -47,7 +47,7 @@ namespace lib4neuro {
                       LM_UPDATE_TYPE update_type,
                       std::ofstream* ofs = nullptr);
 
-        std::vector<double>* get_parameters() override;
+        std::shared_ptr<std::vector<double>> get_parameters() override;
 
         ~LevenbergMarquardt();
     };
diff --git a/src/LearningMethods/ParticleSwarm.cpp b/src/LearningMethods/ParticleSwarm.cpp
index ae93cf6af7533232653ca6b5b7422118f4069ce0..d1306543ce86a38b3e7e286266a6131af878d1ae 100644
--- a/src/LearningMethods/ParticleSwarm.cpp
+++ b/src/LearningMethods/ParticleSwarm.cpp
@@ -255,23 +255,23 @@ namespace lib4neuro {
 
     ParticleSwarm::~ParticleSwarm() {
 
-        if (this->particle_swarm) {
-            for (size_t i = 0; i < this->n_particles; ++i) {
-                delete this->particle_swarm->at(i);
-            }
-
-            delete this->particle_swarm;
-            this->particle_swarm = nullptr;
-        }
-
-        if( this->domain_bounds ){
-            delete this->domain_bounds;
-        }
-
-        if (this->p_min_glob) {
-            delete this->p_min_glob;
-            this->p_min_glob = nullptr;
-        }
+//        if (this->particle_swarm) {
+//            for (size_t i = 0; i < this->n_particles; ++i) {
+//                delete this->particle_swarm.at(i);
+//            }
+//
+//            delete this->particle_swarm;
+//            this->particle_swarm = nullptr;
+//        }
+//
+//        if( this->domain_bounds ){
+//            delete this->domain_bounds;
+//        }
+//
+//        if (this->p_min_glob) {
+//            delete this->p_min_glob;
+//            this->p_min_glob = nullptr;
+//        }
 
     }
 
@@ -300,18 +300,18 @@ namespace lib4neuro {
 
         /* initialize the particles */
         for (size_t pi = 0; pi < this->n_particles; ++pi) {
-            if (this->particle_swarm->at(pi)) {
-                delete this->particle_swarm->at(pi);
+            if (this->particle_swarm.at(pi)) {
+                delete this->particle_swarm.at(pi);
             }
-            this->particle_swarm->at(pi) = new Particle(&ef, ef.get_parameters(), this->radius_factor);
+            this->particle_swarm.at(pi) = new Particle(&ef, ef.get_parameters().get(), this->radius_factor);
         }
         this->radius_factor *= 1.25;
 
-        if (!this->p_min_glob) {
-            this->p_min_glob = new std::vector<double>(this->func_dim);
-        } else {
-            this->p_min_glob->resize(this->func_dim);
-        }
+//        if (!this->p_min_glob) {
+//            this->p_min_glob = new std::vector<double>(this->func_dim);
+//        } else {
+            this->p_min_glob.resize(this->func_dim);
+//        }
 
         size_t outer_it = 0;
         Particle *particle;
@@ -330,7 +330,7 @@ namespace lib4neuro {
         double euclidean_dist;
         double current_err = -1;
 
-        this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
+        this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
 //    for(unsigned int i = 0; i < this->n_particles; ++i){
 //        this->particle_swarm[i]->print_coordinate();
 //    }
@@ -343,10 +343,10 @@ namespace lib4neuro {
             //////////////////////////////////////////////////
             // Clustering algorithm - termination condition //
             //////////////////////////////////////////////////
-            particle = this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
+            particle = this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
 
-            if (std::find(global_best_vec.begin(), global_best_vec.end(), *this->p_min_glob) == global_best_vec.end()) {
-                global_best_vec.emplace_back(*this->p_min_glob); // TODO rewrite as std::set
+            if (std::find(global_best_vec.begin(), global_best_vec.end(), this->p_min_glob) == global_best_vec.end()) {
+                global_best_vec.emplace_back(this->p_min_glob); // TODO rewrite as std::set
             }
 
             cluster.insert(particle);
@@ -369,8 +369,8 @@ namespace lib4neuro {
             }
 
             for (size_t pi = 0; pi < this->n_particles; pi++) {
-                particle = this->particle_swarm->at(pi);
-                tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, *this->p_min_glob,
+                particle = this->particle_swarm.at(pi);
+                tmp_velocity = particle->change_coordinate(this->w, this->c1, this->c2, this->p_min_glob,
                                                            global_best_vec);
 //                particle->print_coordinate();
 
@@ -410,7 +410,7 @@ namespace lib4neuro {
 //            }
 //        }
 
-            current_err = ef.eval(this->p_min_glob);
+            current_err = ef.eval(&this->p_min_glob);
 
             COUT_DEBUG(std::string("Iteration: ") << (unsigned int)(outer_it)
                                                   << ". Total error: " << current_err
@@ -443,7 +443,7 @@ namespace lib4neuro {
                                               << ". Objective function value: " << optimal_value
                                               << "." << std::endl );
 
-        this->determine_optimal_coordinate_and_value(*this->p_min_glob, optimal_value);
+        this->determine_optimal_coordinate_and_value(this->p_min_glob, optimal_value);
         //TODO rewrite following output using COUT_INFO
         if (outer_it < this->iter_max) {
             /* Convergence reached */
@@ -453,7 +453,7 @@ namespace lib4neuro {
             COUT_INFO( std::endl << "Max number of iterations reached ("  <<  outer_it << ")!  Objective function value: " << optimal_value <<std:: endl);
         }
 
-        ef.get_network_instance()->copy_parameter_space(this->p_min_glob);
+        ef.get_network_instance()->copy_parameter_space(&this->p_min_glob);
 
         delete centroid;
     }
@@ -481,18 +481,18 @@ namespace lib4neuro {
 
         Particle *p;
 
-        val = this->particle_swarm->at(0)->get_optimal_value();
-        this->particle_swarm->at(0)->get_optimal_coordinate(coord);
-        p = this->particle_swarm->at(0);
+        val = this->particle_swarm.at(0)->get_optimal_value();
+        this->particle_swarm.at(0)->get_optimal_coordinate(coord);
+        p = this->particle_swarm.at(0);
 
         for (size_t i = 1; i < this->n_particles; ++i) {
 
-            double val_m = this->particle_swarm->at(i)->get_optimal_value();
+            double val_m = this->particle_swarm.at(i)->get_optimal_value();
 
             if (val_m < val) {
                 val = val_m;
-                this->particle_swarm->at(i)->get_optimal_coordinate(coord);
-                p = this->particle_swarm->at(i);
+                this->particle_swarm.at(i)->get_optimal_coordinate(coord);
+                p = this->particle_swarm.at(i);
             }
         }
 
@@ -504,7 +504,7 @@ namespace lib4neuro {
         std::vector<double> *tmp;
 
         for (size_t pi = 0; pi < this->n_particles; pi++) {
-            tmp = this->particle_swarm->at(pi)->get_coordinate();
+            tmp = this->particle_swarm.at(pi)->get_coordinate();
 
             for (size_t di = 0; di < this->func_dim; di++) {
                 (*coords)[di] += (*tmp)[di];
@@ -528,8 +528,10 @@ namespace lib4neuro {
         return std::sqrt(dist);
     }
 
-    std::vector<double> *ParticleSwarm::get_parameters() {
-        return this->p_min_glob;
+    std::shared_ptr<std::vector<double>> ParticleSwarm::get_parameters() {
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset(&this->p_min_glob);
+        return ret;
     }
 
     void ParticleSwarm::init_constructor(std::vector<double>* domain_bounds,
@@ -544,9 +546,9 @@ namespace lib4neuro {
         this->w = w;
         this->n_particles = n_particles;
         this->iter_max = iter_max;
-        this->particle_swarm = new std::vector<Particle *>(this->n_particles);
-        this->domain_bounds = new std::vector<double>(*domain_bounds);
-        std::fill(this->particle_swarm->begin(), this->particle_swarm->end(), nullptr);
+//        this->particle_swarm = new std::vector<Particle *>(this->n_particles);
+//        this->domain_bounds = new std::vector<double>(*domain_bounds);
+        std::fill(this->particle_swarm.begin(), this->particle_swarm.end(), nullptr);
     }
 
 }
\ No newline at end of file
diff --git a/src/LearningMethods/ParticleSwarm.h b/src/LearningMethods/ParticleSwarm.h
index 45ed15002dea8ff222d42cbc0b277bc62e6785db..80da9d60bc3525491b4c6d98a41ed0f575da811d 100644
--- a/src/LearningMethods/ParticleSwarm.h
+++ b/src/LearningMethods/ParticleSwarm.h
@@ -123,7 +123,7 @@ namespace lib4neuro {
         /**
          * Vector of particles contained in the swarm
          */
-        std::vector<Particle *> *particle_swarm = nullptr;
+        std::vector<Particle *> particle_swarm; // = nullptr;
 
         /**
          * Dimension of the optimized function
@@ -195,12 +195,12 @@ namespace lib4neuro {
         /**
          * Bounds for every optimized parameter (p1_lower, p1_upper, p2_lower, p2_upper...)
          */
-        std::vector<double> *domain_bounds = nullptr;
+        std::vector<double> domain_bounds; // = nullptr;
 
         /**
          * Coordinates of the found global minima
          */
-        std::vector<double> *p_min_glob = nullptr;
+        std::vector<double> p_min_glob;
 
     protected:
         /**
@@ -309,7 +309,7 @@ namespace lib4neuro {
          *
          * @return
          */
-        LIB4NEURO_API std::vector<double> *get_parameters() override;
+        LIB4NEURO_API std::shared_ptr<std::vector<double>> get_parameters() override;
     };
 
 }
diff --git a/src/LearningMethods/RandomSolution.cpp b/src/LearningMethods/RandomSolution.cpp
index f5362566b1ff8c99f2ccd10628e174dba7caea01..01e7c013a14786b12bb948bd38b854392dc968e9 100644
--- a/src/LearningMethods/RandomSolution.cpp
+++ b/src/LearningMethods/RandomSolution.cpp
@@ -18,8 +18,10 @@ namespace lib4neuro {
 
     }
 
-    std::vector<double>* RandomSolution::get_parameters() {
-        return &this->optimal_parameters;
+    std::shared_ptr<std::vector<double>>  RandomSolution::get_parameters() {
+        std::shared_ptr<std::vector<double>> ret;
+        ret.reset(&this->optimal_parameters);
+        return ret;
     }
 
     void RandomSolution::optimize(lib4neuro::ErrorFunction &ef, std::ofstream *ofs) {
diff --git a/src/LearningMethods/RandomSolution.h b/src/LearningMethods/RandomSolution.h
index 38fbdfdfdf982abd989a0ac6022c7c2682cbf1c1..e7088f55def238b60e019b98e169cda3511bddee 100644
--- a/src/LearningMethods/RandomSolution.h
+++ b/src/LearningMethods/RandomSolution.h
@@ -29,7 +29,7 @@ namespace lib4neuro {
         virtual void optimize(lib4neuro::ErrorFunction &ef,
                               std::ofstream *ofs = nullptr) override;
 
-        virtual std::vector<double> *get_parameters() override;
+        virtual std::shared_ptr<std::vector<double>> get_parameters() override;
 
     };
 
diff --git a/src/Solvers/DESolver.cpp b/src/Solvers/DESolver.cpp
index 0b38436b6169dbf102c5c5bc0162e5d7eab63114..262bb66759ba611bcf897d3f7443f76f364ed64a 100644
--- a/src/Solvers/DESolver.cpp
+++ b/src/Solvers/DESolver.cpp
@@ -408,7 +408,7 @@ namespace lib4neuro {
         printf("error before optimization: %f\n", total_error.eval(nullptr));
 
         learning_method.optimize(total_error);
-        this->solution->copy_parameter_space(learning_method.get_parameters());
+        this->solution->copy_parameter_space(learning_method.get_parameters().get());
 
         printf("error after optimization: %f\n", total_error.eval(nullptr));
     }
diff --git a/src/examples/net_test_1.cpp b/src/examples/net_test_1.cpp
index 0327c10d3a84cd5663036c60727c0fa2f3734ebe..fa98dc05c7ec994eb77addb07dfd6a55a6293c9c 100644
--- a/src/examples/net_test_1.cpp
+++ b/src/examples/net_test_1.cpp
@@ -49,7 +49,7 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e
     );
     swarm_01.optimize( ef );
 
-    std::vector<double> *parameters = swarm_01.get_parameters();
+    std::vector<double> *parameters = swarm_01.get_parameters().get();
     net.copy_parameter_space(parameters);
 
     /* ERROR CALCULATION */
@@ -64,7 +64,7 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction
 
     gd.optimize( ef );
 
-    std::vector<double> *parameters = gd.get_parameters();
+    std::vector<double> *parameters = gd.get_parameters().get();
     net.copy_parameter_space(parameters);
 
     /* ERROR CALCULATION */
diff --git a/src/examples/net_test_2.cpp b/src/examples/net_test_2.cpp
index ab3f3dd993e94d5e8bbe175ebc6e73b5dc8ba89c..54c356e6817b22292ca791c8c9f1c4ab8791c4c1 100644
--- a/src/examples/net_test_2.cpp
+++ b/src/examples/net_test_2.cpp
@@ -48,7 +48,7 @@ void optimize_via_particle_swarm( l4n::NeuralNetwork &net, l4n::ErrorFunction &e
     );
     swarm_01.optimize( ef );
 
-    std::vector<double> *parameters = swarm_01.get_parameters();
+    std::vector<double> *parameters = swarm_01.get_parameters().get();
     net.copy_parameter_space(parameters);
 
     std::cout << "Run finished! Error of the network[Particle swarm]: " << ef.eval( nullptr ) << std::endl;
@@ -61,7 +61,7 @@ void optimize_via_gradient_descent( l4n::NeuralNetwork &net, l4n::ErrorFunction
 
     gd.optimize( ef );
 
-    std::vector<double> *parameters = gd.get_parameters();
+    std::vector<double> *parameters = gd.get_parameters().get();
     net.copy_parameter_space(parameters);
 
     /* ERROR CALCULATION */
diff --git a/src/examples/network_serialization.cpp b/src/examples/network_serialization.cpp
index 30f0523d14e8d03727704ef78dd5b9c2d6f347fd..75890c6af505c56dd87eb216d83fbebc1947a44d 100644
--- a/src/examples/network_serialization.cpp
+++ b/src/examples/network_serialization.cpp
@@ -116,7 +116,7 @@ int main() {
     );
     swarm_01.optimize( mse );
 
-    std::vector<double> *parameters = swarm_01.get_parameters();
+    std::vector<double> *parameters = swarm_01.get_parameters().get();
     net.copy_parameter_space(parameters);
 
     printf("w1 = %10.7f\n", parameters->at( 0 ));
diff --git a/src/examples/seminar.cpp b/src/examples/seminar.cpp
index 615292135dee3232a56e5fff442d234eab7b7640..ff36d49630e4888ec18f36bf9d8375d9628ea4d0 100644
--- a/src/examples/seminar.cpp
+++ b/src/examples/seminar.cpp
@@ -116,7 +116,7 @@ int main() {
     );
     swarm_01.optimize( mse );
 
-    std::vector<double> *parameters = swarm_01.get_parameters( );
+    std::vector<double> *parameters = swarm_01.get_parameters( ).get();
     XOR.copy_parameter_space(parameters);
 
     /* ERROR CALCULATION */