diff --git a/docs.it4i/anselm/software/numerical-languages/matlab.md b/docs.it4i/anselm/software/numerical-languages/matlab.md
index ac1b0cc5e6b5728f0079b57b771ec17a219f4d8d..f39ca4d3cc46ea7ec6589a1cda2715e155f5af5d 100644
--- a/docs.it4i/anselm/software/numerical-languages/matlab.md
+++ b/docs.it4i/anselm/software/numerical-languages/matlab.md
@@ -92,26 +92,26 @@ r1i0n17$ matlab &
 To run matlab in batch mode, write an matlab script, then write a bash jobscript and execute via the qsub command. By default, matlab will execute one matlab worker instance per allocated core.
 
 ```bash
-    #!/bin/bash
-    #PBS -A PROJECT ID
-    #PBS -q qprod
-    #PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
+#!/bin/bash
+#PBS -A PROJECT ID
+#PBS -q qprod
+#PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
 
-    # change to shared scratch directory
-    SCR=/scratch/work/user/$USER/$PBS_JOBID
-    mkdir -p $SCR ; cd $SCR || exit
+# change to shared scratch directory
+SCR=/scratch/work/user/$USER/$PBS_JOBID
+mkdir -p $SCR ; cd $SCR || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/matlabcode.m .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/matlabcode.m .
 
-    # load modules
-    module load MATLAB/2015a-EDU
+# load modules
+module load MATLAB/2015a-EDU
 
-    # execute the calculation
-    matlab -nodisplay -r matlabcode > output.out
+# execute the calculation
+matlab -nodisplay -r matlabcode > output.out
 
-    # copy output file to home
-    cp output.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp output.out $PBS_O_WORKDIR/.
 ```
 
 This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs and matlab script are in matlabcode.m file, outputs in output.out file. Note the missing .m extension in the matlab -r matlabcodefile call, **the .m must not be included**. Note that the **shared /scratch must be used**. Further, it is **important to include quit** statement at the end of the matlabcode.m script.
@@ -138,40 +138,40 @@ This script creates scheduler object "cluster" of type "local" that starts worke
 The last step is to start matlabpool with "cluster" object and correct number of workers. We have 24 cores per node, so we start 24 workers.
 
 ```console
-    parpool(cluster,16);
+parpool(cluster,16);
 
 
-    ... parallel code ...
+... parallel code ...
 
 
-    parpool close
+parpool close
 ```
 
 The complete example showing how to use Distributed Computing Toolbox in local mode is shown here.
 
 ```console
-    cluster = parcluster('local');
-    cluster
+cluster = parcluster('local');
+cluster
 
-    parpool(cluster,24);
+parpool(cluster,24);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    T;
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+T;
+whos         % T and W are both distributed arrays here.
 
-    parpool close
-    quit
+parpool close
+quit
 ```
 
 You can copy and paste the example in a .m file and execute. Note that the parpool size should correspond to **total number of cores** available on allocated nodes.
@@ -183,29 +183,29 @@ This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBS
 This is an example of m-script using PBS mode:
 
 ```console
-    cluster = parcluster('SalomonPBSPro');
-    set(cluster, 'SubmitArguments', '-A OPEN-0-0');
-    set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=16');
-    set(cluster, 'NumWorkers', 160);
+cluster = parcluster('SalomonPBSPro');
+set(cluster, 'SubmitArguments', '-A OPEN-0-0');
+set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=16');
+set(cluster, 'NumWorkers', 160);
 
-    pool = parpool(cluster, 160);
+pool = parpool(cluster, 160);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+whos         % T and W are both distributed arrays here.
 
-    % shut down parallel pool
-    delete(pool)
+% shut down parallel pool
+delete(pool)
 ```
 
 Note that we first construct a cluster object using the imported profile, then set some important options, namely: SubmitArguments, where you need to specify accounting id, and ResourceTemplate, where you need to specify number of nodes to run the job.
@@ -224,28 +224,28 @@ For this method, you need to use SalomonDirect profile, import it using [the sam
 This is an example of m-script using direct mode:
 
 ```console
-    parallel.importProfile('/apps/all/MATLAB/2015a-EDU/SalomonDirect.settings')
-    cluster = parcluster('SalomonDirect');
-    set(cluster, 'NumWorkers', 48);
+parallel.importProfile('/apps/all/MATLAB/2015a-EDU/SalomonDirect.settings')
+cluster = parcluster('SalomonDirect');
+set(cluster, 'NumWorkers', 48);
 
-    pool = parpool(cluster, 48);
+pool = parpool(cluster, 48);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+whos         % T and W are both distributed arrays here.
 
-    % shut down parallel pool
-    delete(pool)
+% shut down parallel pool
+delete(pool)
 ```
 
 ### Non-Interactive Session and Licenses
diff --git a/docs.it4i/anselm/software/numerical-languages/octave.md b/docs.it4i/anselm/software/numerical-languages/octave.md
index 4fbb52979a38da23ec3a9a3c93e456383f99ab22..12651d9f0243fb670bd92d1bcee735fdd639bc64 100644
--- a/docs.it4i/anselm/software/numerical-languages/octave.md
+++ b/docs.it4i/anselm/software/numerical-languages/octave.md
@@ -29,25 +29,25 @@ $ octave
 To run octave in batch mode, write an octave script, then write a bash jobscript and execute via the qsub command. By default, octave will use 16 threads when running MKL kernels.
 
 ```bash
-    #!/bin/bash
+#!/bin/bash
 
-    # change to local scratch directory
-    cd /lscratch/$PBS_JOBID || exit
+# change to local scratch directory
+cd /lscratch/$PBS_JOBID || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/octcode.m .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/octcode.m .
 
-    # load octave module
-    module load octave
+# load octave module
+module load octave
 
-    # execute the calculation
-    octave -q --eval octcode > output.out
+# execute the calculation
+octave -q --eval octcode > output.out
 
-    # copy output file to home
-    cp output.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp output.out $PBS_O_WORKDIR/.
 
-    #exit
-    exit
+#exit
+exit
 ```
 
 This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](../../job-submission-and-execution/).
diff --git a/docs.it4i/anselm/software/numerical-languages/r.md b/docs.it4i/anselm/software/numerical-languages/r.md
index 8916ccb7cc21a1e9bf7de6bda24d1a38bdf82263..9d189e85b8d5033435826d45e7f541f5defd5b13 100644
--- a/docs.it4i/anselm/software/numerical-languages/r.md
+++ b/docs.it4i/anselm/software/numerical-languages/r.md
@@ -45,25 +45,25 @@ To run R in batch mode, write an R script, then write a bash jobscript and execu
 Example jobscript:
 
 ```bash
-    #!/bin/bash
+#!/bin/bash
 
-    # change to local scratch directory
-    cd /lscratch/$PBS_JOBID || exit
+# change to local scratch directory
+cd /lscratch/$PBS_JOBID || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/rscript.R .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/rscript.R .
 
-    # load R module
-    module load R
+# load R module
+module load R
 
-    # execute the calculation
-    R CMD BATCH rscript.R routput.out
+# execute the calculation
+R CMD BATCH rscript.R routput.out
 
-    # copy output file to home
-    cp routput.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp routput.out $PBS_O_WORKDIR/.
 
-    #exit
-    exit
+#exit
+exit
 ```
 
 This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section](../../job-submission-and-execution/).
@@ -105,35 +105,35 @@ The forking is the most simple to use. Forking family of functions provide paral
 Forking example:
 
 ```r
-    library(parallel)
+library(parallel)
 
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #initialize
-    size <- detectCores()
+#initialize
+size <- detectCores()
 
-    while (TRUE)
-    {
-      #read number of intervals
-      cat("Enter the number of intervals: (0 quits) ")
-      fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+while (TRUE)
+{
+  #read number of intervals
+  cat("Enter the number of intervals: (0 quits) ")
+  fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
 
-      if(n<=0) break
+  if(n<=0) break
 
-      #run the calculation
-      n <- max(n,size)
-      h <-   1.0/n
+  #run the calculation
+  n <- max(n,size)
+  h <-   1.0/n
 
-      i <- seq(1,n);
-      pi3 <- h*sum(simplify2array(mclapply(i,f,h,mc.cores=size)));
+  i <- seq(1,n);
+  pi3 <- h*sum(simplify2array(mclapply(i,f,h,mc.cores=size)));
 
-      #print results
-      cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
+  #print results
+  cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
 ```
 
 The above example is the classic parallel example for calculating the number π. Note the **detectCores()** and **mclapply()** functions. Execute the example as:
@@ -169,47 +169,47 @@ Static Rmpi programs are executed via mpiexec, as any other MPI programs. Number
 Static Rmpi example:
 
 ```r
-    library(Rmpi)
+library(Rmpi)
 
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #initialize
-    invisible(mpi.comm.dup(0,1))
-    rank <- mpi.comm.rank()
-    size <- mpi.comm.size()
-    n<-0
+#initialize
+invisible(mpi.comm.dup(0,1))
+rank <- mpi.comm.rank()
+size <- mpi.comm.size()
+n<-0
 
-    while (TRUE)
-    {
-      #read number of intervals
-      if (rank==0) {
-       cat("Enter the number of intervals: (0 quits) ")
-       fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
-      }
+while (TRUE)
+{
+  #read number of intervals
+  if (rank==0) {
+   cat("Enter the number of intervals: (0 quits) ")
+   fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+  }
 
-      #broadcat the intervals
-      n <- mpi.bcast(as.integer(n),type=1)
+  #broadcat the intervals
+  n <- mpi.bcast(as.integer(n),type=1)
 
-      if(n<=0) break
+  if(n<=0) break
 
-      #run the calculation
-      n <- max(n,size)
-      h <-   1.0/n
+  #run the calculation
+  n <- max(n,size)
+  h <-   1.0/n
 
-      i <- seq(rank+1,n,size);
-      mypi <- h*sum(sapply(i,f,h));
+  i <- seq(rank+1,n,size);
+  mypi <- h*sum(sapply(i,f,h));
 
-      pi3 <- mpi.reduce(mypi)
+  pi3 <- mpi.reduce(mypi)
 
-      #print results
-      if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
+  #print results
+  if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
 
-    mpi.quit()
+mpi.quit()
 ```
 
 The above is the static MPI example for calculating the number π. Note the **library(Rmpi)** and **mpi.comm.dup()** function calls.
@@ -227,61 +227,61 @@ Dynamic Rmpi programs are executed by calling the R directly. openmpi module mus
 Dynamic Rmpi example:
 
 ```r
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #the worker function
-    workerpi <- function()
-    {
-    #initialize
-    rank <- mpi.comm.rank()
-    size <- mpi.comm.size()
-    n<-0
+#the worker function
+workerpi <- function()
+{
+#initialize
+rank <- mpi.comm.rank()
+size <- mpi.comm.size()
+n<-0
 
-    while (TRUE)
-    {
-      #read number of intervals
-      if (rank==0) {
-       cat("Enter the number of intervals: (0 quits) ")
-       fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
-      }
+while (TRUE)
+{
+  #read number of intervals
+  if (rank==0) {
+   cat("Enter the number of intervals: (0 quits) ")
+   fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+  }
 
-      #broadcat the intervals
-      n <- mpi.bcast(as.integer(n),type=1)
+  #broadcat the intervals
+  n <- mpi.bcast(as.integer(n),type=1)
 
-      if(n<=0) break
+  if(n<=0) break
 
-      #run the calculation
-      n <- max(n,size)
-      h <-   1.0/n
+  #run the calculation
+  n <- max(n,size)
+  h <-   1.0/n
 
-      i <- seq(rank+1,n,size);
-      mypi <- h*sum(sapply(i,f,h));
+  i <- seq(rank+1,n,size);
+  mypi <- h*sum(sapply(i,f,h));
 
-      pi3 <- mpi.reduce(mypi)
+  pi3 <- mpi.reduce(mypi)
 
-      #print results
-      if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
-    }
+  #print results
+  if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
+}
 
-    #main
-    library(Rmpi)
+#main
+library(Rmpi)
 
-    cat("Enter the number of slaves: ")
-    fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
+cat("Enter the number of slaves: ")
+fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
 
-    mpi.spawn.Rslaves(nslaves=ns)
-    mpi.bcast.Robj2slave(f)
-    mpi.bcast.Robj2slave(workerpi)
+mpi.spawn.Rslaves(nslaves=ns)
+mpi.bcast.Robj2slave(f)
+mpi.bcast.Robj2slave(workerpi)
 
-    mpi.bcast.cmd(workerpi())
-    workerpi()
+mpi.bcast.cmd(workerpi())
+workerpi()
 
-    mpi.quit()
+mpi.quit()
 ```
 
 The above example is the dynamic MPI example for calculating the number π. Both master and slave processes carry out the calculation. Note the mpi.spawn.Rslaves(), mpi.bcast.Robj2slave()** and the mpi.bcast.cmd()** function calls.
@@ -304,51 +304,51 @@ Execution is identical to other dynamic Rmpi programs.
 mpi.apply Rmpi example:
 
 ```r
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
-
-    #the worker function
-    workerpi <- function(rank,size,n)
-    {
-      #run the calculation
-      n <- max(n,size)
-      h <- 1.0/n
-
-      i <- seq(rank,n,size);
-      mypi <- h*sum(sapply(i,f,h));
-
-      return(mypi)
-    }
-
-    #main
-    library(Rmpi)
-
-    cat("Enter the number of slaves: ")
-    fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
-
-    mpi.spawn.Rslaves(nslaves=ns)
-    mpi.bcast.Robj2slave(f)
-    mpi.bcast.Robj2slave(workerpi)
-
-    while (TRUE)
-    {
-      #read number of intervals
-      cat("Enter the number of intervals: (0 quits) ")
-      fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
-      if(n<=0) break
-
-      #run workerpi
-      i=seq(1,2*ns)
-      pi3=sum(mpi.parSapply(i,workerpi,2*ns,n))
-
-      #print results
-      cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
-
-    mpi.quit()
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
+
+#the worker function
+workerpi <- function(rank,size,n)
+{
+  #run the calculation
+  n <- max(n,size)
+  h <- 1.0/n
+
+  i <- seq(rank,n,size);
+  mypi <- h*sum(sapply(i,f,h));
+
+  return(mypi)
+}
+
+#main
+library(Rmpi)
+
+cat("Enter the number of slaves: ")
+fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
+
+mpi.spawn.Rslaves(nslaves=ns)
+mpi.bcast.Robj2slave(f)
+mpi.bcast.Robj2slave(workerpi)
+
+while (TRUE)
+{
+  #read number of intervals
+  cat("Enter the number of intervals: (0 quits) ")
+  fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+  if(n<=0) break
+
+  #run workerpi
+  i=seq(1,2*ns)
+  pi3=sum(mpi.parSapply(i,workerpi,2*ns,n))
+
+  #print results
+  cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
+
+mpi.quit()
 ```
 
 The above is the mpi.apply MPI example for calculating the number π. Only the slave processes carry out the calculation. Note the **mpi.parSapply()**, function call. The package parallel [example](r/#package-parallel)[above](r/#package-parallel) may be trivially adapted (for much better performance) to this structure using the mclapply() in place of mpi.parSapply().
@@ -370,30 +370,30 @@ The R parallel jobs are executed via the PBS queue system exactly as any other p
 Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, running 1 process per core:
 
 ```bash
-    #!/bin/bash
-    #PBS -q qprod
-    #PBS -N Rjob
-    #PBS -l select=100:ncpus=16:mpiprocs=16:ompthreads=1
+#!/bin/bash
+#PBS -q qprod
+#PBS -N Rjob
+#PBS -l select=100:ncpus=16:mpiprocs=16:ompthreads=1
 
-    # change to scratch directory
-    SCRDIR=/scratch/$USER/myjob
-    cd $SCRDIR || exit
+# change to scratch directory
+SCRDIR=/scratch/$USER/myjob
+cd $SCRDIR || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/rscript.R .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/rscript.R .
 
-    # load R and openmpi module
-    module load R
-    module load openmpi
+# load R and openmpi module
+module load R
+module load openmpi
 
-    # execute the calculation
-    mpiexec -bycore -bind-to-core R --slave --no-save --no-restore -f rscript.R
+# execute the calculation
+mpiexec -bycore -bind-to-core R --slave --no-save --no-restore -f rscript.R
 
-    # copy output file to home
-    cp routput.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp routput.out $PBS_O_WORKDIR/.
 
-    #exit
-    exit
+#exit
+exit
 ```
 
 For more information about jobscript and MPI execution refer to the [Job submission](../../job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections.
diff --git a/docs.it4i/salomon/software/numerical-languages/matlab.md b/docs.it4i/salomon/software/numerical-languages/matlab.md
index e08bf9099ee9d5175a8579afe2fc9d6d32b1aa8f..5a64b1ff1ca604aac8f0833d54f8932a991e2c04 100644
--- a/docs.it4i/salomon/software/numerical-languages/matlab.md
+++ b/docs.it4i/salomon/software/numerical-languages/matlab.md
@@ -50,11 +50,11 @@ Delete previously used file mpiLibConf.m, we have observed crashes when using In
 To use Distributed Computing, you first need to setup a parallel profile. We have provided the profile for you, you can either import it in MATLAB command line:
 
 ```console
-    > parallel.importProfile('/apps/all/MATLAB/2015b-EDU/SalomonPBSPro.settings')
+> parallel.importProfile('/apps/all/MATLAB/2015b-EDU/SalomonPBSPro.settings')
 
-    ans =
+ans =
 
-    SalomonPBSPro
+SalomonPBSPro
 ```
 
 Or in the GUI, go to tab HOME -> Parallel -> Manage Cluster Profiles..., click Import and navigate to :
@@ -79,8 +79,8 @@ The second part of the command shows how to request all necessary licenses. In t
 Once the access to compute nodes is granted by PBS, user can load following modules and start Matlab:
 
 ```console
-    r1i0n17$ ml MATLAB/2015a-EDU
-    r1i0n17$ matlab &
+r1i0n17$ ml MATLAB/2015a-EDU
+r1i0n17$ matlab &
 ```
 
 ### Parallel Matlab Batch Job in Local Mode
@@ -88,26 +88,26 @@ Once the access to compute nodes is granted by PBS, user can load following modu
 To run matlab in batch mode, write an matlab script, then write a bash jobscript and execute via the qsub command. By default, matlab will execute one matlab worker instance per allocated core.
 
 ```bash
-    #!/bin/bash
-    #PBS -A PROJECT ID
-    #PBS -q qprod
-    #PBS -l select=1:ncpus=24:mpiprocs=24:ompthreads=1
+#!/bin/bash
+#PBS -A PROJECT ID
+#PBS -q qprod
+#PBS -l select=1:ncpus=24:mpiprocs=24:ompthreads=1
 
-    # change to shared scratch directory
-    SCR=/scratch/work/user/$USER/$PBS_JOBID
-    mkdir -p $SCR ; cd $SCR || exit
+# change to shared scratch directory
+SCR=/scratch/work/user/$USER/$PBS_JOBID
+mkdir -p $SCR ; cd $SCR || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/matlabcode.m .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/matlabcode.m .
 
-    # load modules
-    module load MATLAB/2015a-EDU
+# load modules
+module load MATLAB/2015a-EDU
 
-    # execute the calculation
-    matlab -nodisplay -r matlabcode > output.out
+# execute the calculation
+matlab -nodisplay -r matlabcode > output.out
 
-    # copy output file to home
-    cp output.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp output.out $PBS_O_WORKDIR/.
 ```
 
 This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs and matlab script are in matlabcode.m file, outputs in output.out file. Note the missing .m extension in the matlab -r matlabcodefile call, **the .m must not be included**.  Note that the **shared /scratch must be used**. Further, it is **important to include quit** statement at the end of the matlabcode.m script.
@@ -123,7 +123,7 @@ $ qsub ./jobscript
 The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started.
 
 ```console
-    cluster = parcluster('local')
+cluster = parcluster('local')
 ```
 
 This script creates scheduler object "cluster" of type "local" that starts workers locally.
@@ -134,40 +134,40 @@ This script creates scheduler object "cluster" of type "local" that starts worke
 The last step is to start matlabpool with "cluster" object and correct number of workers. We have 24 cores per node, so we start 24 workers.
 
 ```console
-    parpool(cluster,24);
+parpool(cluster,24);
 
 
-    ... parallel code ...
+... parallel code ...
 
 
-    parpool close
+parpool close
 ```
 
 The complete example showing how to use Distributed Computing Toolbox in local mode is shown here.
 
 ```console
-    cluster = parcluster('local');
-    cluster
+cluster = parcluster('local');
+cluster
 
-    parpool(cluster,24);
+parpool(cluster,24);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    T;
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+T;
+whos         % T and W are both distributed arrays here.
 
-    parpool close
-    quit
+parpool close
+quit
 ```
 
 You can copy and paste the example in a .m file and execute. Note that the parpool size should correspond to **total number of cores** available on allocated nodes.
@@ -179,29 +179,29 @@ This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBS
 This is an example of m-script using PBS mode:
 
 ```console
-    cluster = parcluster('SalomonPBSPro');
-    set(cluster, 'SubmitArguments', '-A OPEN-0-0');
-    set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=24');
-    set(cluster, 'NumWorkers', 240);
+cluster = parcluster('SalomonPBSPro');
+set(cluster, 'SubmitArguments', '-A OPEN-0-0');
+set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=24');
+set(cluster, 'NumWorkers', 240);
 
-    pool = parpool(cluster,240);
+pool = parpool(cluster,240);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+whos         % T and W are both distributed arrays here.
 
-    % shut down parallel pool
-    delete(pool)
+% shut down parallel pool
+delete(pool)
 ```
 
 Note that we first construct a cluster object using the imported profile, then set some important options, namely : SubmitArguments, where you need to specify accounting id, and ResourceTemplate, where you need to specify number of nodes to run the job.
@@ -220,28 +220,28 @@ For this method, you need to use SalomonDirect profile, import it using [the sam
 This is an example of m-script using direct mode:
 
 ```console
-    parallel.importProfile('/apps/all/MATLAB/2015b-EDU/SalomonDirect.settings')
-    cluster = parcluster('SalomonDirect');
-    set(cluster, 'NumWorkers', 48);
+parallel.importProfile('/apps/all/MATLAB/2015b-EDU/SalomonDirect.settings')
+cluster = parcluster('SalomonDirect');
+set(cluster, 'NumWorkers', 48);
 
-    pool = parpool(cluster, 48);
+pool = parpool(cluster, 48);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+whos         % T and W are both distributed arrays here.
 
-    % shut down parallel pool
-    delete(pool)
+% shut down parallel pool
+delete(pool)
 ```
 
 ### Non-Interactive Session and Licenses
diff --git a/docs.it4i/salomon/software/numerical-languages/r.md b/docs.it4i/salomon/software/numerical-languages/r.md
index 6df515adad043a581ce3da7855737194b1c250ae..3af38b31824b55cf634ef55a76e31f6bcb035425 100644
--- a/docs.it4i/salomon/software/numerical-languages/r.md
+++ b/docs.it4i/salomon/software/numerical-languages/r.md
@@ -166,47 +166,47 @@ Static Rmpi programs are executed via mpiexec, as any other MPI programs. Number
 Static Rmpi example:
 
 ```r
-    library(Rmpi)
+library(Rmpi)
 
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #initialize
-    invisible(mpi.comm.dup(0,1))
-    rank <- mpi.comm.rank()
-    size <- mpi.comm.size()
-    n<-0
+#initialize
+invisible(mpi.comm.dup(0,1))
+rank <- mpi.comm.rank()
+size <- mpi.comm.size()
+n<-0
 
-    while (TRUE)
-    {
-      #read number of intervals
-      if (rank==0) {
-       cat("Enter the number of intervals: (0 quits) ")
-       fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
-      }
+while (TRUE)
+{
+  #read number of intervals
+  if (rank==0) {
+   cat("Enter the number of intervals: (0 quits) ")
+   fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+  }
 
-      #broadcat the intervals
-      n <- mpi.bcast(as.integer(n),type=1)
+  #broadcat the intervals
+  n <- mpi.bcast(as.integer(n),type=1)
 
-      if(n<=0) break
+  if(n<=0) break
 
-      #run the calculation
-      n <- max(n,size)
-      h <-   1.0/n
+  #run the calculation
+  n <- max(n,size)
+  h <-   1.0/n
 
-      i <- seq(rank+1,n,size);
-      mypi <- h*sum(sapply(i,f,h));
+  i <- seq(rank+1,n,size);
+  mypi <- h*sum(sapply(i,f,h));
 
-      pi3 <- mpi.reduce(mypi)
+  pi3 <- mpi.reduce(mypi)
 
-      #print results
-      if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
+  #print results
+  if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
 
-    mpi.quit()
+mpi.quit()
 ```
 
 The above is the static MPI example for calculating the number π. Note the **library(Rmpi)** and **mpi.comm.dup()** function calls. Execute the example as:
@@ -222,61 +222,61 @@ Dynamic Rmpi programs are executed by calling the R directly. OpenMPI module mus
 Dynamic Rmpi example:
 
 ```r
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #the worker function
-    workerpi <- function()
-    {
-    #initialize
-    rank <- mpi.comm.rank()
-    size <- mpi.comm.size()
-    n<-0
+#the worker function
+workerpi <- function()
+{
+#initialize
+rank <- mpi.comm.rank()
+size <- mpi.comm.size()
+n<-0
 
-    while (TRUE)
-    {
-      #read number of intervals
-      if (rank==0) {
-       cat("Enter the number of intervals: (0 quits) ")
-       fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
-      }
+while (TRUE)
+{
+  #read number of intervals
+  if (rank==0) {
+   cat("Enter the number of intervals: (0 quits) ")
+   fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+  }
 
-      #broadcat the intervals
-      n <- mpi.bcast(as.integer(n),type=1)
+  #broadcat the intervals
+  n <- mpi.bcast(as.integer(n),type=1)
 
-      if(n<=0) break
+  if(n<=0) break
 
-      #run the calculation
-      n <- max(n,size)
-      h <-   1.0/n
+  #run the calculation
+  n <- max(n,size)
+  h <-   1.0/n
 
-      i <- seq(rank+1,n,size);
-      mypi <- h*sum(sapply(i,f,h));
+  i <- seq(rank+1,n,size);
+  mypi <- h*sum(sapply(i,f,h));
 
-      pi3 <- mpi.reduce(mypi)
+  pi3 <- mpi.reduce(mypi)
 
-      #print results
-      if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
-    }
+  #print results
+  if (rank==0) cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
+}
 
-    #main
-    library(Rmpi)
+#main
+library(Rmpi)
 
-    cat("Enter the number of slaves: ")
-    fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
+cat("Enter the number of slaves: ")
+fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
 
-    mpi.spawn.Rslaves(nslaves=ns)
-    mpi.bcast.Robj2slave(f)
-    mpi.bcast.Robj2slave(workerpi)
+mpi.spawn.Rslaves(nslaves=ns)
+mpi.bcast.Robj2slave(f)
+mpi.bcast.Robj2slave(workerpi)
 
-    mpi.bcast.cmd(workerpi())
-    workerpi()
+mpi.bcast.cmd(workerpi())
+workerpi()
 
-    mpi.quit()
+mpi.quit()
 ```
 
 The above example is the dynamic MPI example for calculating the number π. Both master and slave processes carry out the calculation. Note the mpi.spawn.Rslaves(), mpi.bcast.Robj2slave()** and the mpi.bcast.cmd()** function calls.