From ff2a5b30295a6e6e659d1dd42c0976985c7c34d6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Krup=C4=8D=C3=ADk?= <lukas.krupcik@vsb.cz>
Date: Mon, 2 Jul 2018 13:06:43 +0200
Subject: [PATCH] Cisteni

---
 README.md                                     |   5 -
 docs.it4i/anselm/capacity-computing.md        |   2 +-
 docs.it4i/anselm/compute-nodes.md             |   2 +-
 docs.it4i/anselm/hardware-overview.md         |   2 +-
 .../anselm/job-submission-and-execution.md    |   2 +-
 .../anselm/software/debuggers/allinea-ddt.md  |   6 +-
 .../debuggers/allinea-performance-reports.md  |   4 +-
 .../software/intel-suite/intel-compilers.md   |   2 +-
 .../software/intel-suite/intel-debugger.md    |  10 +-
 docs.it4i/anselm/software/mpi/mpi.md          |  10 +-
 .../software/numerical-languages/matlab.md    | 164 +++++++++---------
 docs.it4i/anselm/storage.md                   |  12 +-
 .../graphical-user-interface/vnc.md           |   2 +-
 .../certificates-faq.md                       |  10 +-
 .../obtaining-login-credentials.md            |   4 +-
 docs.it4i/index.md                            |   2 +-
 docs.it4i/prace.md                            |  14 +-
 docs.it4i/salomon/capacity-computing.md       |   2 +-
 docs.it4i/salomon/hardware-overview.md        |   2 +-
 .../salomon/job-submission-and-execution.md   |   6 +-
 docs.it4i/salomon/software/ansys/ansys-cfx.md |   2 +-
 .../salomon/software/ansys/ansys-fluent.md    |   4 +-
 .../salomon/software/ansys/ansys-ls-dyna.md   |   2 +-
 .../software/ansys/ansys-mechanical-apdl.md   |   2 +-
 docs.it4i/salomon/software/ansys/ansys.md     |   4 +-
 .../salomon/software/chemistry/nwchem.md      |   2 +-
 .../salomon/software/debuggers/valgrind.md    |   4 +-
 .../software/numerical-languages/octave.md    |   6 +-
 docs.it4i/salomon/storage.md                  |   8 +-
 .../cae/comsol/comsol-multiphysics.md         |  10 +-
 docs.it4i/software/compilers.md               |  16 +-
 docs.it4i/software/debuggers/aislinn.md       |   2 +-
 .../debuggers/intel-vtune-amplifier.md        |   2 +-
 docs.it4i/software/debuggers/papi.md          |   2 +-
 docs.it4i/software/debuggers/score-p.md       |   2 +-
 docs.it4i/software/debuggers/valgrind.md      |   4 +-
 .../software/intel/intel-xeon-phi-anselm.md   |  12 +-
 .../software/intel/intel-xeon-phi-salomon.md  |  12 +-
 docs.it4i/software/isv_licenses.md            |   2 +-
 docs.it4i/software/lang/java.md               |   6 +-
 docs.it4i/software/modules/lmod.md            |   8 +-
 docs.it4i/software/mpi/mpi.md                 |   2 +-
 .../software/mpi/mpi4py-mpi-for-python.md     |   2 +-
 docs.it4i/software/mpi/ompi-examples.md       |   6 +-
 .../software/numerical-languages/matlab.md    |  12 +-
 .../numerical-languages/matlab_1314.md        |  16 +-
 .../software/numerical-languages/octave.md    |   4 +-
 .../numerical-languages/opencoarrays.md       |   2 +-
 docs.it4i/software/numerical-languages/r.md   |  14 +-
 .../software/numerical-libraries/hdf5.md      |   4 +-
 docs.it4i/software/orca.md                    |   4 +-
 docs.it4i/software/tools/ansys/ansys-cfx.md   |   2 +-
 .../software/tools/ansys/ansys-fluent.md      |   4 +-
 .../software/tools/ansys/ansys-ls-dyna.md     |   2 +-
 .../tools/ansys/ansys-mechanical-apdl.md      |   2 +-
 docs.it4i/software/tools/ansys/ansys.md       |   2 +-
 docs.it4i/software/tools/ansys/ls-dyna.md     |   2 +-
 docs.it4i/software/tools/ansys/workbench.md   |   2 +-
 docs.it4i/software/tools/easybuild-images.md  |   2 +-
 docs.it4i/software/viz/openfoam.md            |  12 +-
 60 files changed, 234 insertions(+), 237 deletions(-)

diff --git a/README.md b/README.md
index 92333675f..82d31ceb8 100644
--- a/README.md
+++ b/README.md
@@ -12,11 +12,6 @@ This is project contain IT4Innovations user documentation source.
 * [http://facelessuser.github.io/pymdown-extensions/](http://facelessuser.github.io/pymdown-extensions/)
 * [http://squidfunk.github.io/mkdocs-material/](http://squidfunk.github.io/mkdocs-material/)
 
-## Rules
-
-* [spellcheck https://github.com/lukeapage/node-markdown-spellcheck](spellcheck https://github.com/lukeapage/node-markdown-spellcheck)
-* [SI units http://physics.nist.gov/cuu/Units/checklist.html](SI units http://physics.nist.gov/cuu/Units/checklist.html)
-
 ```
 fair-share
 InfiniBand
diff --git a/docs.it4i/anselm/capacity-computing.md b/docs.it4i/anselm/capacity-computing.md
index 4b191d815..41bb930fb 100644
--- a/docs.it4i/anselm/capacity-computing.md
+++ b/docs.it4i/anselm/capacity-computing.md
@@ -83,7 +83,7 @@ $ qsub -N JOBNAME -J 1-900 jobscript
 12345[].dm2
 ```
 
-In this example, we submit a job array of 900 subjobs. Each subjob will run on one full node and is assumed to take less than 2 hours (please note the #PBS directives in the beginning of the jobscript file, don't forget to set your valid PROJECT_ID and desired queue).
+In this example, we submit a job array of 900 subjobs. Each subjob will run on one full node and is assumed to take less than 2 hours (note the #PBS directives in the beginning of the jobscript file, don't forget to set your valid PROJECT_ID and desired queue).
 
 Sometimes for testing purposes, you may need to submit a one-element only array. This is not allowed by PBSPro, but there's a workaround:
 
diff --git a/docs.it4i/anselm/compute-nodes.md b/docs.it4i/anselm/compute-nodes.md
index 9e5ed7135..2ffd49193 100644
--- a/docs.it4i/anselm/compute-nodes.md
+++ b/docs.it4i/anselm/compute-nodes.md
@@ -55,7 +55,7 @@ Anselm is cluster of x86-64 Intel based nodes built with Bull Extreme Computing
 | Node type                    | Count | Range       | Memory | Cores       | [Access](resources-allocation-policy/)    |
 | ---------------------------- | ----- | ----------- | ------ | ----------- | --------------------------------------    |
 | Nodes without an accelerator | 180   | cn[1-180]   | 64GB   | 16 @ 2.4GHz | qexp, qprod, qlong, qfree, qprace, qatlas |
-| Nodes with a GPU accelerator | 23    | cn[181-203] | 96GB   | 16 @ 2.3GHz | qgpu, qexp                                |
+| Nodes with a GPU accelerator | 23    | cn[181-203] | 96GB   | 16 @ 2.3GHz | qnvidia, qexp                             |
 | Nodes with a MIC accelerator | 4     | cn[204-207] | 96GB   | 16 @ 2.3GHz | qmic, qexp                                |
 | Fat compute nodes            | 2     | cn[208-209] | 512GB  | 16 @ 2.4GHz | qfat, qexp                                |
 
diff --git a/docs.it4i/anselm/hardware-overview.md b/docs.it4i/anselm/hardware-overview.md
index f91c5bc70..5f10e6500 100644
--- a/docs.it4i/anselm/hardware-overview.md
+++ b/docs.it4i/anselm/hardware-overview.md
@@ -57,4 +57,4 @@ The parameters are summarized in the following tables:
 | MIC accelerated  | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB  | Intel Xeon Phi 5110P |
 | Fat compute node | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 512 GB | -                    |
 
-For more details please refer to [Compute nodes](compute-nodes/), [Storage](storage/), and [Network](network/).
+For more details refer to [Compute nodes](compute-nodes/), [Storage](storage/), and [Network](network/).
diff --git a/docs.it4i/anselm/job-submission-and-execution.md b/docs.it4i/anselm/job-submission-and-execution.md
index 8b1e201f3..0d5321f82 100644
--- a/docs.it4i/anselm/job-submission-and-execution.md
+++ b/docs.it4i/anselm/job-submission-and-execution.md
@@ -2,7 +2,7 @@
 
 ## Job Submission
 
-When allocating computational resources for the job, please specify
+When allocating computational resources for the job, specify:
 
 1. a suitable queue for your job (the default is qprod)
 1. the number of computational nodes required
diff --git a/docs.it4i/anselm/software/debuggers/allinea-ddt.md b/docs.it4i/anselm/software/debuggers/allinea-ddt.md
index a5dd069ba..a1f26a9fb 100644
--- a/docs.it4i/anselm/software/debuggers/allinea-ddt.md
+++ b/docs.it4i/anselm/software/debuggers/allinea-ddt.md
@@ -25,14 +25,14 @@ In case of debugging on accelerators:
 Load all necessary modules to compile the code. For example:
 
 ```bash
-    $ module load intel
-    $ module load impi   ... or ... module load openmpi/X.X.X-icc
+    $ ml intel
+    $ ml impi   ... or ... ml openmpi/X.X.X-icc
 ```
 
 Load the Allinea DDT module:
 
 ```bash
-    $ module load Forge
+    $ ml Forge
 ```
 
 Compile the code:
diff --git a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
index 7b519b85c..92602c6b3 100644
--- a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
+++ b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
@@ -13,7 +13,7 @@ Our license is limited to 64 MPI processes.
 Allinea Performance Reports version 6.0 is available
 
 ```bash
-    $ module load PerformanceReports/6.0
+    $ ml PerformanceReports/6.0
 ```
 
 The module sets up environment variables, required for using the Allinea Performance Reports. This particular command loads the default module, which is performance reports version 4.2.
@@ -46,7 +46,7 @@ First, we allocate some nodes via the express queue:
 Then we load the modules and run the program the usual way:
 
 ```bash
-    $ module load intel impi allinea-perf-report/4.2
+    $ ml intel impi allinea-perf-report/4.2
     $ mpirun ./mympiprog.x
 ```
 
diff --git a/docs.it4i/anselm/software/intel-suite/intel-compilers.md b/docs.it4i/anselm/software/intel-suite/intel-compilers.md
index 9cc1f1e0a..71708a733 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-compilers.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-compilers.md
@@ -3,7 +3,7 @@
 The Intel compilers version 13.1.1 are available, via module Intel. The compilers include the ICC C and C++ compiler and the IFORT Fortran 77/90/95 compiler.
 
 ```bash
-    $ module load intel
+    $ ml intel
     $ icc -v
     $ ifort -v
 ```
diff --git a/docs.it4i/anselm/software/intel-suite/intel-debugger.md b/docs.it4i/anselm/software/intel-suite/intel-debugger.md
index d3acd2c51..c8919a1d0 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-debugger.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-debugger.md
@@ -5,7 +5,7 @@
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI.
 
 ```bash
-    $ module load intel
+    $ ml intel
     $ idb
 ```
 
@@ -24,8 +24,8 @@ Example:
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19654.srv11 ready
 
-    $ module load intel
-    $ module load java
+    $ ml intel
+    $ ml java
     $ icc -O0 -g myprog.c -o myprog.x
     $ idb ./myprog.x
 ```
@@ -45,7 +45,7 @@ For debugging small number of MPI ranks, you may execute and debug each rank in
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19655.srv11 ready
 
-    $ module load intel impi
+    $ ml intel impi
     $ mpirun -ppn 1 -hostfile $PBS_NODEFILE --enable-x xterm -e idbc ./mympiprog.x
 ```
 
@@ -60,7 +60,7 @@ Run the idb debugger from within the MPI debug option. This will cause the debug
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19655.srv11 ready
 
-    $ module load intel impi
+    $ ml intel impi
     $ mpirun -n 32 -idb ./mympiprog.x
 ```
 
diff --git a/docs.it4i/anselm/software/mpi/mpi.md b/docs.it4i/anselm/software/mpi/mpi.md
index 08be4bce6..2990cf222 100644
--- a/docs.it4i/anselm/software/mpi/mpi.md
+++ b/docs.it4i/anselm/software/mpi/mpi.md
@@ -14,10 +14,10 @@ The Anselm cluster provides several implementations of the MPI library:
 
 MPI libraries are activated via the environment modules.
 
-Look up section modulefiles/mpi in module avail
+Look up section modulefiles/mpi in ml av
 
 ```bash
-    $ module avail
+    $ ml av
     ------------------------- /opt/modules/modulefiles/mpi -------------------------
     bullxmpi/bullxmpi-1.2.4.1 mvapich2/1.9-icc
     impi/4.0.3.008             openmpi/1.6.5-gcc(default)
@@ -44,7 +44,7 @@ There are default compilers associated with any particular MPI implementation. T
 Examples:
 
 ```bash
-    $ module load openmpi
+    $ ml openmpi
 ```
 
 In this example, we activate the latest openmpi with latest GNU compilers
@@ -52,8 +52,8 @@ In this example, we activate the latest openmpi with latest GNU compilers
 To use openmpi with the intel compiler suite, use
 
 ```bash
-    $ module load intel
-    $ module load openmpi/1.6.5-icc
+    $ ml intel
+    $ ml openmpi/1.6.5-icc
 ```
 
 In this example, the openmpi 1.6.5 using intel compilers is activated
diff --git a/docs.it4i/anselm/software/numerical-languages/matlab.md b/docs.it4i/anselm/software/numerical-languages/matlab.md
index cfc958b7e..847b32d8b 100644
--- a/docs.it4i/anselm/software/numerical-languages/matlab.md
+++ b/docs.it4i/anselm/software/numerical-languages/matlab.md
@@ -10,31 +10,31 @@ Matlab is available in versions R2015a and R2015b. There are always two variants
 To load the latest version of Matlab load the module
 
 ```bash
-    $ module load MATLAB
+$ ml MATLAB
 ```
 
 By default the EDU variant is marked as default. If you need other version or variant, load the particular version. To obtain the list of available versions use
 
 ```bash
-    $ module avail MATLAB
+$ ml av MATLAB
 ```
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
 
-If you require the Matlab GUI, please follow the general information about [running graphical applications](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+If you require the Matlab GUI, follow the general information about [running graphical applications](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
-Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part [here](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/)) is recommended.
+Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/)) is recommended.
 
 To run Matlab with GUI, use
 
 ```bash
-    $ matlab
+$ matlab
 ```
 
 To run Matlab in text mode, without the Matlab Desktop GUI environment, use
 
 ```bash
-    $ matlab -nodesktop -nosplash
+$ matlab -nodesktop -nosplash
 ```
 
 plots, images, etc... will be still available.
@@ -72,8 +72,8 @@ With the new mode, MATLAB itself launches the workers via PBS, so you can either
 Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/).
 
 ```bash
-    $ xhost +
-    $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=1 -l walltime=00:30:00
+$ xhost +
+$ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=1 -l walltime=00:30:00
     -l feature__matlab__MATLAB=1
 ```
 
@@ -84,8 +84,8 @@ The second part of the command shows how to request all necessary licenses. In t
 Once the access to compute nodes is granted by PBS, user can load following modules and start Matlab:
 
 ```bash
-    r1i0n17$ module load MATLAB/2015b-EDU
-    r1i0n17$ matlab &
+r1i0n17$ ml MATLAB/2015b-EDU
+r1i0n17$ matlab &
 ```
 
 ### Parallel Matlab Batch Job in Local Mode
@@ -93,26 +93,26 @@ Once the access to compute nodes is granted by PBS, user can load following modu
 To run matlab in batch mode, write an matlab script, then write a bash jobscript and execute via the qsub command. By default, matlab will execute one matlab worker instance per allocated core.
 
 ```bash
-    #!/bin/bash
-    #PBS -A PROJECT ID
-    #PBS -q qprod
-    #PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
+#!/bin/bash
+#PBS -A PROJECT ID
+#PBS -q qprod
+#PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
 
-    # change to shared scratch directory
-    SCR=/scratch/work/user/$USER/$PBS_JOBID
-    mkdir -p $SCR ; cd $SCR || exit
+# change to shared scratch directory
+SCR=/scratch/work/user/$USER/$PBS_JOBID
+mkdir -p $SCR ; cd $SCR || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/matlabcode.m .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/matlabcode.m .
 
-    # load modules
-    module load MATLAB/2015a-EDU
+# load modules
+ml MATLAB/2015a-EDU
 
-    # execute the calculation
-    matlab -nodisplay -r matlabcode > output.out
+# execute the calculation
+matlab -nodisplay -r matlabcode > output.out
 
-    # copy output file to home
-    cp output.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp output.out $PBS_O_WORKDIR/.
 ```
 
 This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs and matlab script are in matlabcode.m file, outputs in output.out file. Note the missing .m extension in the matlab -r matlabcodefile call, **the .m must not be included**. Note that the **shared /scratch must be used**. Further, it is **important to include quit** statement at the end of the matlabcode.m script.
@@ -120,7 +120,7 @@ This script may be submitted directly to the PBS workload manager via the qsub c
 Submit the jobscript using qsub
 
 ```bash
-    $ qsub ./jobscript
+$ qsub ./jobscript
 ```
 
 ### Parallel Matlab Local Mode Program Example
@@ -128,7 +128,7 @@ Submit the jobscript using qsub
 The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started.
 
 ```bash
-    cluster = parcluster('local')
+cluster = parcluster('local')
 ```
 
 This script creates scheduler object "cluster" of type "local" that starts workers locally.
@@ -151,28 +151,28 @@ The last step is to start matlabpool with "cluster" object and correct number of
 The complete example showing how to use Distributed Computing Toolbox in local mode is shown here.
 
 ```bash
-    cluster = parcluster('local');
-    cluster
+cluster = parcluster('local');
+cluster
 
-    parpool(cluster,24);
+parpool(cluster,24);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    T;
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+T;
+whos         % T and W are both distributed arrays here.
 
-    parpool close
-    quit
+parpool close
+quit
 ```
 
 You can copy and paste the example in a .m file and execute. Note that the parpool size should correspond to **total number of cores** available on allocated nodes.
@@ -184,29 +184,29 @@ This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBS
 This is an example of m-script using PBS mode:
 
 ```bash
-    cluster = parcluster('SalomonPBSPro');
-    set(cluster, 'SubmitArguments', '-A OPEN-0-0');
-    set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=16');
-    set(cluster, 'NumWorkers', 160);
+cluster = parcluster('SalomonPBSPro');
+set(cluster, 'SubmitArguments', '-A OPEN-0-0');
+set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=16');
+set(cluster, 'NumWorkers', 160);
 
-    pool = parpool(cluster, 160);
+pool = parpool(cluster, 160);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+            % T and W are both codistributed arrays here.
+end
+whos         % T and W are both distributed arrays here.
 
-    % shut down parallel pool
-    delete(pool)
+% shut down parallel pool
+delete(pool)
 ```
 
 Note that we first construct a cluster object using the imported profile, then set some important options, namely: SubmitArguments, where you need to specify accounting id, and ResourceTemplate, where you need to specify number of nodes to run the job.
@@ -225,39 +225,39 @@ For this method, you need to use SalomonDirect profile, import it using [the sam
 This is an example of m-script using direct mode:
 
 ```bash
-    parallel.importProfile('/apps/all/MATLAB/2015a-EDU/SalomonDirect.settings')
-    cluster = parcluster('SalomonDirect');
-    set(cluster, 'NumWorkers', 48);
+parallel.importProfile('/apps/all/MATLAB/2015a-EDU/SalomonDirect.settings')
+cluster = parcluster('SalomonDirect');
+set(cluster, 'NumWorkers', 48);
 
-    pool = parpool(cluster, 48);
+pool = parpool(cluster, 48);
 
-    n=2000;
+n=2000;
 
-    W = rand(n,n);
-    W = distributed(W);
-    x = (1:n)';
-    x = distributed(x);
-    spmd
-    [~, name] = system('hostname')
+W = rand(n,n);
+W = distributed(W);
+x = (1:n)';
+x = distributed(x);
+spmd
+[~, name] = system('hostname')
 
-        T = W*x; % Calculation performed on labs, in parallel.
-                 % T and W are both codistributed arrays here.
-    end
-    whos         % T and W are both distributed arrays here.
+    T = W*x; % Calculation performed on labs, in parallel.
+             % T and W are both codistributed arrays here.
+end
+whos         % T and W are both distributed arrays here.
 
-    % shut down parallel pool
-    delete(pool)
+% shut down parallel pool
+delete(pool)
 ```
 
 ### Non-Interactive Session and Licenses
 
-If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l _feature_matlab_MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/).
+If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l _feature_matlab_MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro [look here](../isv_licenses/).
 
-In case of non-interactive session please read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
+In case of non-interactive session read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
 
 ### Matlab Distributed Computing Engines Start Up Time
 
-Starting Matlab workers is an expensive process that requires certain amount of time. For your information please see the following table:
+Starting Matlab workers is an expensive process that requires certain amount of time. For your information see the following table:
 
 | compute nodes | number of workers | start-up time[s] |
 | ------------- | ----------------- | ---------------- |
diff --git a/docs.it4i/anselm/storage.md b/docs.it4i/anselm/storage.md
index 5587dccc7..4842ee179 100644
--- a/docs.it4i/anselm/storage.md
+++ b/docs.it4i/anselm/storage.md
@@ -98,7 +98,7 @@ The architecture of Lustre on Anselm is composed of two metadata servers (MDS) a
 
 ### HOME File System
 
-The HOME filesystem is mounted in directory /home. Users home directories /home/username reside on this filesystem. Accessible capacity is 320TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 250GB per user. If 250GB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
+The HOME filesystem is mounted in directory /home. Users home directories /home/username reside on this filesystem. Accessible capacity is 320TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 250GB per user. If 250GB should prove as insufficient for particular user, contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
 
 !!! note
     The HOME filesystem is intended for preparation, evaluation, processing and storage of data generated by active Projects.
@@ -127,7 +127,7 @@ Default stripe size is 1MB, stripe count is 1. There are 22 OSTs dedicated for t
 
 ### SCRATCH File System
 
-The SCRATCH filesystem is mounted in directory /scratch. Users may freely create subdirectories and files on the filesystem. Accessible capacity is 146TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 100TB per user. The purpose of this quota is to prevent runaway programs from filling the entire filesystem and deny service to other users. If 100TB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
+The SCRATCH filesystem is mounted in directory /scratch. Users may freely create subdirectories and files on the filesystem. Accessible capacity is 146TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 100TB per user. The purpose of this quota is to prevent runaway programs from filling the entire filesystem and deny service to other users. If 100TB should prove as insufficient for particular user, contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
 
 !!! note
     The Scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the SCRATCH filesystem as their working directory.
@@ -324,7 +324,7 @@ User of data storage CESNET (DU) association can become organizations or an indi
 
 User may only use data storage CESNET for data transfer and storage which are associated with activities in science, research, development, the spread of education, culture and prosperity. In detail see “Acceptable Use Policy CESNET Large Infrastructure (Acceptable Use Policy, AUP)”.
 
-The service is documented [here](https://du.cesnet.cz/en/start). For special requirements please contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz).
+The service is documented [here](https://du.cesnet.cz/en/start). For special requirements contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz).
 
 The procedure to obtain the CESNET access is quick and trouble-free.
 
@@ -378,7 +378,7 @@ $ cp -a mydir cesnet/.
 $ cp cesnet/myfile .
 ```
 
-Once done, please remember to unmount the storage
+Once done, remember to unmount the storage
 
 ```console
 $ fusermount -u cesnet
@@ -386,8 +386,8 @@ $ fusermount -u cesnet
 
 ### RSYNC Access
 
-!!! Note "Note"
-	RSYNC provides delta transfer for best performance, can resume interrupted transfers
+!!! info
+    RSYNC provides delta transfer for best performance, can resume interrupted transfers
 
 RSYNC is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination.  RSYNC is widely used for backups and mirroring and as an improved copy command for everyday use.
 
diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
index 986502bda..538ed5c5b 100644
--- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
+++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
@@ -91,7 +91,7 @@ At your machine, create the tunnel:
 local $  ssh -TN -f username@login2.cluster-name.it4i.cz -L 5961:localhost:5961
 ```
 
-Issue the following command to check the tunnel is established (please note the PID 2022 in the last column, you'll need it for closing the tunnel):
+Issue the following command to check the tunnel is established (note the PID 2022 in the last column, you'll need it for closing the tunnel):
 
 ```console
 local $ netstat -natp | grep 5961
diff --git a/docs.it4i/general/obtaining-login-credentials/certificates-faq.md b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md
index 4d2315dd2..815344b2d 100644
--- a/docs.it4i/general/obtaining-login-credentials/certificates-faq.md
+++ b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md
@@ -31,7 +31,7 @@ Yes, provided that the CA which provides this service is also a member of IGTF.
 
 ## Q: Does IT4Innovations Support the TERENA Certificate Service?
 
- Yes, ITInnovations supports TERENA eScience personal certificates. For more information, please visit [TCS - Trusted Certificate Service](https://tcs-escience-portal.terena.org/), where you also can find if your organisation/country can use this service
+ Yes, ITInnovations supports TERENA eScience personal certificates. For more information, visit [TCS - Trusted Certificate Service](https://tcs-escience-portal.terena.org/), where you also can find if your organisation/country can use this service
 
 ## Q: What Format Should My Certificate Take?
 
@@ -53,7 +53,7 @@ Certification Authority (CA) certificates are used to verify the link between yo
 
 To assist users, SURFsara (a member of PRACE) provides a complete and up-to-date bundle of all the CA certificates that any PRACE user (or IT4Innovations grid services user) will require. Bundle of certificates, in either p12, PEM or JKS formats, are [available here](https://winnetou.surfsara.nl/prace/certs/).
 
-It is worth noting that gsissh-term and DART automatically updates their CA certificates from this SURFsara website. In other cases, if you receive a warning that a server’s certificate can not be validated (not trusted), then please update your CA certificates via the SURFsara website. If this fails, then please contact the IT4Innovations helpdesk.
+It is worth noting that gsissh-term and DART automatically updates their CA certificates from this SURFsara website. In other cases, if you receive a warning that a server’s certificate can not be validated (not trusted), then update your CA certificates via the SURFsara website. If this fails, then contact the IT4Innovations helpdesk.
 
 Lastly, if you need the CA certificates for a personal Globus 5 installation, then you can install the CA certificates from a MyProxy server with the following command.
 
@@ -104,7 +104,7 @@ To check your certificate (e.g., DN, validity, issuer, public key algorithm, etc
     openssl x509 -in usercert.pem -text -noout
 ```
 
-To download openssl if not pre-installed, [please visit](https://www.openssl.org/source/). On Macintosh Mac OS X computers openssl is already pre-installed and can be used immediately.
+To download openssl if not pre-installed, see [here](https://www.openssl.org/source/). On Macintosh Mac OS X computers openssl is already pre-installed and can be used immediately.
 
 ## Q: How Do I Create and Then Manage a Keystore?
 
@@ -134,7 +134,7 @@ Most grid services require the use of your certificate; however, the format of y
 
 If employing the PRACE version of GSISSH-term (also a Java Web Start Application), you may use either the PEM or p12 formats. Note that this service automatically installs up-to-date PRACE CA certificates.
 
-If the grid service is UNICORE, then you bind your certificate, in either the p12 format or JKS, to UNICORE during the installation of the client on your local machine. For more information, please visit [UNICORE6 in PRACE](http://www.prace-ri.eu/UNICORE6-in-PRACE)
+If the grid service is UNICORE, then you bind your certificate, in either the p12 format or JKS, to UNICORE during the installation of the client on your local machine. For more information visit [UNICORE6 in PRACE](http://www.prace-ri.eu/UNICORE6-in-PRACE)
 
 If the grid service is part of Globus, such as GSI-SSH, GriFTP or GRAM5, then the certificates can be in either p12 or PEM format and must reside in the "$HOME/.globus" directory for Linux and Mac users or %HOMEPATH%.globus for Windows users. (Windows users will have to use the DOS command ’cmd’ to create a directory which starts with a ’.’). Further, user certificates should be named either "usercred.p12" or "usercert.pem" and "userkey.pem", and the CA certificates must be kept in a pre-specified directory as follows. For Linux and Mac users, this directory is either $HOME/.globus/certificates or /etc/grid-security/certificates. For Windows users, this directory is %HOMEPATH%.globuscertificates. (If you are using GSISSH-Term from prace-ri.eu then you do not have to create the .globus directory nor install CA certificates to use this tool alone).
 
@@ -156,7 +156,7 @@ A proxy certificate is a short-lived certificate which may be employed by UNICOR
 
 ## Q: Someone May Have Copied or Had Access to the Private Key of My Certificate Either in a Separate File or in the Browser. What Should I Do?
 
-Please ask the CA that issued your certificate to revoke this certificate and to supply you with a new one. In addition, please report this to IT4Innovations by contacting [the support team](https://support.it4i.cz/rt).
+Please ask the CA that issued your certificate to revoke this certificate and to supply you with a new one. In addition, report this to IT4Innovations by contacting [the support team](https://support.it4i.cz/rt).
 
 ## Q: My Certificate Expired. What Should I Do?
 
diff --git a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
index b8168ecff..4218af925 100644
--- a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
+++ b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
@@ -117,13 +117,13 @@ Certificate generation process for academic purposes, utilizing the CESNET certi
 !!! note
     Certificate file can be installed into your email client. Web-based email interfaces cannot be used for secure communication, external application, such as Thunderbird or Outlook must be used. This way, your new credentials will be visible only in applications, that have access to your certificate.
 
-If you are not able to obtain certificate from any of the respected certification authorities, please follow the Alternative Way bellow.
+If you are not able to obtain certificate from any of the respected certification authorities, follow the Alternative Way bellow.
 
 A FAQ about certificates can be found here: [Certificates FAQ](certificates-faq/).
 
 ## Alternative Way to Personal Certificate
 
-Follow these steps **only** if you can not obtain your certificate in a standard way. In case you choose this procedure, please attach a **scan of photo ID** (personal ID or passport or drivers license) when applying for login credentials.
+Follow these steps **only** if you can not obtain your certificate in a standard way. In case you choose this procedure, attach a **scan of photo ID** (personal ID or passport or drivers license) when applying for login credentials.
 
 !!! warning
     Please use Firefox (clone) for following steps. Other browsers, like Chrome, are not compatible.
diff --git a/docs.it4i/index.md b/docs.it4i/index.md
index 326416a60..650646963 100644
--- a/docs.it4i/index.md
+++ b/docs.it4i/index.md
@@ -60,7 +60,7 @@ local $
 ## Errors
 
 Although we have taken every care to ensure the accuracy of the content, mistakes do happen.
-If you find an inconsistency or error, please report it by visiting <http://support.it4i.cz/rt>, creating a new ticket, and entering the details.
+If you find an inconsistency or error, report it by visiting <http://support.it4i.cz/rt>, creating a new ticket, and entering the details.
 By doing so, you can save other readers from frustration and help us improve.
 
 !!! tip
diff --git a/docs.it4i/prace.md b/docs.it4i/prace.md
index 261489971..ceb5b53d8 100644
--- a/docs.it4i/prace.md
+++ b/docs.it4i/prace.md
@@ -8,7 +8,7 @@ All general [PRACE User Documentation](http://www.prace-ri.eu/user-documentation
 
 ## Help and Support
 
-If you have any troubles, need information, request support or want to install additional software, please use [PRACE Helpdesk](http://www.prace-ri.eu/helpdesk-guide264/).
+If you have any troubles, need information, request support or want to install additional software, use [PRACE Helpdesk](http://www.prace-ri.eu/helpdesk-guide264/).
 
 Information about the local services are provided in the [introduction of general user documentation Salomon](salomon/introduction/) and [introduction of general user documentation Anselm](anselm/introduction/). Please keep in mind, that standard PRACE accounts don't have a password to access the web interface of the local (IT4Innovations) request tracker and thus a new ticket should be created by sending an e-mail to support[at]it4i.cz.
 
@@ -16,7 +16,7 @@ Information about the local services are provided in the [introduction of genera
 
 In general PRACE users already have a PRACE account setup through their HOMESITE (institution from their country) as a result of rewarded PRACE project proposal. This includes signed PRACE AuP, generated and registered certificates, etc.
 
-If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on a cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so please see the corresponding [section of the general documentation here](general/obtaining-login-credentials/obtaining-login-credentials/).
+If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on a cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so see the corresponding [section of the general documentation here](general/obtaining-login-credentials/obtaining-login-credentials/).
 
 ## Accessing the Cluster
 
@@ -24,7 +24,7 @@ If there's a special need a PRACE user can get a standard (local) account at IT4
 
 For all PRACE users the method for interactive access (login) and data transfer based on grid services from Globus Toolkit (GSI SSH and GridFTP) is supported.
 
-The user will need a valid certificate and to be present in the PRACE LDAP (please contact your HOME SITE or the primary investigator of your project for LDAP account creation).
+The user will need a valid certificate and to be present in the PRACE LDAP (contact your HOME SITE or the primary investigator of your project for LDAP account creation).
 
 Most of the information needed by PRACE users accessing the TIER-1 systems can be found here:
 
@@ -147,7 +147,7 @@ $ gsiscp -P 2222 anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_
 
 ### Access to X11 Applications (VNC)
 
-If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, please see the [section in general documentation](general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, see the [section in general documentation](general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
 If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](general/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH:
 
@@ -157,11 +157,11 @@ $ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961
 
 ### Access With SSH
 
-After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information please see [the section in general documentation for Salomon](salomon/shell-and-data-access/) and [the section in general documentation for Anselm](anselm/shell-and-data-access/).
+After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information see [the section in general documentation for Salomon](salomon/shell-and-data-access/) and [the section in general documentation for Anselm](anselm/shell-and-data-access/).
 
 ## File Transfers
 
-PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, please see [the section in the general documentation for Salomon](salomon/shell-and-data-access/) and [the section in general documentation for Anselm](anselm/shell-and-data-access/).
+PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, see [the section in the general documentation for Salomon](salomon/shell-and-data-access/) and [the section in general documentation for Anselm](anselm/shell-and-data-access/).
 
 Apart from the standard mechanisms, for PRACE users to transfer data to/from Salomon cluster, a GridFTP server running Globus Toolkit GridFTP service is available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners).
 
@@ -381,4 +381,4 @@ $ quota
 $ lfs quota -u USER_LOGIN /scratch
 ```
 
-If the quota is insufficient, please contact the [support](prace/#help-and-support) and request an increase.
+If the quota is insufficient, contact the [support](prace/#help-and-support) and request an increase.
diff --git a/docs.it4i/salomon/capacity-computing.md b/docs.it4i/salomon/capacity-computing.md
index 8b0a331fb..e65e5ad99 100644
--- a/docs.it4i/salomon/capacity-computing.md
+++ b/docs.it4i/salomon/capacity-computing.md
@@ -83,7 +83,7 @@ $ qsub -N JOBNAME -J 1-900 jobscript
 506493[].isrv5
 ```
 
-In this example, we submit a job array of 900 subjobs. Each subjob will run on full node and is assumed to take less than 2 hours (please note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue).
+In this example, we submit a job array of 900 subjobs. Each subjob will run on full node and is assumed to take less than 2 hours (note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue).
 
 Sometimes for testing purposes, you may need to submit only one-element array. This is not allowed by PBSPro, but there's a workaround:
 
diff --git a/docs.it4i/salomon/hardware-overview.md b/docs.it4i/salomon/hardware-overview.md
index d1bd65a81..4569358cf 100644
--- a/docs.it4i/salomon/hardware-overview.md
+++ b/docs.it4i/salomon/hardware-overview.md
@@ -36,7 +36,7 @@ The parameters are summarized in the following tables:
 | w/o accelerator | 576   | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24    | 128 GB | -                                             |
 | MIC accelerated | 432   | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24    | 128 GB | 2 x Intel Xeon Phi 7120P, 61 cores, 16 GB RAM |
 
-For more details please refer to the [Compute nodes](compute-nodes/).
+For more details refer to the [Compute nodes](compute-nodes/).
 
 ## Remote Visualization Nodes
 
diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md
index 8330fdd3e..0018ea9e3 100644
--- a/docs.it4i/salomon/job-submission-and-execution.md
+++ b/docs.it4i/salomon/job-submission-and-execution.md
@@ -2,7 +2,7 @@
 
 ## Job Submission
 
-When allocating computational resources for the job, please specify
+When allocating computational resources for the job, specify:
 
 1. suitable queue for your job (default is qprod)
 1. number of computational nodes required
@@ -460,7 +460,7 @@ cp $PBS_O_WORKDIR/input .
 cp $PBS_O_WORKDIR/mympiprog.x .
 
 # load the MPI module
-module load OpenMPI
+ml OpenMPI
 
 # execute the calculation
 mpiexec -pernode ./mympiprog.x
@@ -498,7 +498,7 @@ SCRDIR=/scratch/work/user/$USER/myjob
 cd $SCRDIR || exit
 
 # load the MPI module
-module load OpenMPI
+ml OpenMPI
 
 # execute the calculation
 mpiexec ./mympiprog.x
diff --git a/docs.it4i/salomon/software/ansys/ansys-cfx.md b/docs.it4i/salomon/software/ansys/ansys-cfx.md
index 2cf29101d..ce25a028b 100644
--- a/docs.it4i/salomon/software/ansys/ansys-cfx.md
+++ b/docs.it4i/salomon/software/ansys/ansys-cfx.md
@@ -25,7 +25,7 @@ echo Directory is `pwd`
 echo This jobs runs on the following processors:
 echo `cat $PBS_NODEFILE`
 
-module load ansys
+ml ansys
 
 #### Set number of processors per host listing
 #### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2)
diff --git a/docs.it4i/salomon/software/ansys/ansys-fluent.md b/docs.it4i/salomon/software/ansys/ansys-fluent.md
index f4867b5c7..27afebf82 100644
--- a/docs.it4i/salomon/software/ansys/ansys-fluent.md
+++ b/docs.it4i/salomon/software/ansys/ansys-fluent.md
@@ -30,7 +30,7 @@ echo This jobs runs on the following processors:
 echo `cat $PBS_NODEFILE`
 
 #### Load ansys module so that we find the cfx5solve command
-module load ansys
+ml ansys
 
 # Use following line to specify MPI for message-passing instead
 NCORES=`wc -l $PBS_NODEFILE |awk '{print $1}'`
@@ -82,7 +82,7 @@ input is the name of the input file.
 
 case is the name of the .cas file that the input file will utilize.
 
-fluent_args are extra ANSYS FLUENT arguments. As shown in the previous example, you can specify the interconnect by using the  -p interconnect command. The available interconnects include ethernet (the default), myrinet, infiniband,  vendor, altix, and crayx. The MPI is selected automatically, based on the specified interconnect.
+fluent_args are extra ANSYS FLUENT arguments. As shown in the previous example, you can specify the interconnect by using the  -p interconnect command. The available interconnects include ethernet (the default), myrinet, Infiniband,  vendor, altix, and crayx. The MPI is selected automatically, based on the specified interconnect.
 
 outfile is the name of the file to which the standard output will be sent.
 
diff --git a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md
index e9fda41a9..55be78c14 100644
--- a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md
+++ b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md
@@ -30,7 +30,7 @@ NPROCS=`wc -l < $PBS_NODEFILE`
 
 echo This job has allocated $NPROCS nodes
 
-module load ansys
+ml ansys
 
 #### Set number of processors per host listing
 #### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2)
diff --git a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md
index 0bde6f3a1..450c9750d 100644
--- a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md
+++ b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md
@@ -26,7 +26,7 @@ echo Directory is `pwd`
 echo This jobs runs on the following processors:
 echo `cat $PBS_NODEFILE`
 
-module load ansys
+ml ansys
 
 #### Set number of processors per host listing
 #### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2)
diff --git a/docs.it4i/salomon/software/ansys/ansys.md b/docs.it4i/salomon/software/ansys/ansys.md
index a5cac322d..79fca741f 100644
--- a/docs.it4i/salomon/software/ansys/ansys.md
+++ b/docs.it4i/salomon/software/ansys/ansys.md
@@ -1,13 +1,13 @@
 # Overview of ANSYS Products
 
-**[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM)
+**[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM)
 
 Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [More about licensing here](licensing/)
 
 To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module:
 
 ```bash
-    $ module load ansys
+    $ ml ansys
 ```
 
 ANSYS supports interactive regime, but due to assumed solution of extremely difficult tasks it is not recommended.
diff --git a/docs.it4i/salomon/software/chemistry/nwchem.md b/docs.it4i/salomon/software/chemistry/nwchem.md
index 465adf204..bbabdd3ee 100644
--- a/docs.it4i/salomon/software/chemistry/nwchem.md
+++ b/docs.it4i/salomon/software/chemistry/nwchem.md
@@ -16,7 +16,7 @@ The following versions are currently installed:
 For a current list of installed versions, execute:
 
 ```bash
-    module avail NWChem
+    ml av NWChem
 ```
 
 The recommend to use version 6.5. Version 6.3 fails on Salomon nodes with accelerator, because it attempts to communicate over scif0 interface. In 6.5 this is avoided by setting ARMCI_OPENIB_DEVICE=mlx4_0, this setting is included in the module.
diff --git a/docs.it4i/salomon/software/debuggers/valgrind.md b/docs.it4i/salomon/software/debuggers/valgrind.md
index 5e6a2c8e8..5a7cf7db0 100644
--- a/docs.it4i/salomon/software/debuggers/valgrind.md
+++ b/docs.it4i/salomon/software/debuggers/valgrind.md
@@ -13,7 +13,7 @@ The main tools available in Valgrind are :
 * **Hellgrind** and **DRD** can detect race conditions in multi-threaded applications.
 * **Cachegrind**, a cache profiler.
 * **Callgrind**, a callgraph analyzer.
-* For a full list and detailed documentation, please refer to the [official Valgrind documentation](http://valgrind.org/docs/).
+* For a full list and detailed documentation, refer to the [official Valgrind documentation](http://valgrind.org/docs/).
 
 ## Installed Versions
 
@@ -21,7 +21,7 @@ There are two versions of Valgrind available on the cluster.
 
 * Version 3.8.1, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. Also, it does not support AVX2 instructions, debugging of an AVX2-enabled executable with this version will fail
 * Version 3.11.0 built by ICC with support for Intel MPI, available in module Valgrind/3.11.0-intel-2015b. After loading the module, this version replaces the default valgrind.
-* Version 3.11.0 built by GCC with support for Open MPI, module Valgrind/3.11.0-foss-2015b
+* Version 3.11.0 built by GCC with support for OpenMPI, module Valgrind/3.11.0-foss-2015b
 
 ## Usage
 
diff --git a/docs.it4i/salomon/software/numerical-languages/octave.md b/docs.it4i/salomon/software/numerical-languages/octave.md
index 7484d97a3..787e6a325 100644
--- a/docs.it4i/salomon/software/numerical-languages/octave.md
+++ b/docs.it4i/salomon/software/numerical-languages/octave.md
@@ -9,7 +9,7 @@ Two versions of octave are available on the cluster, via module
 | **Stable** | Octave 3.8.2 | Octave |
 
 ```bash
-    $ module load Octave
+    $ ml Octave
 ```
 
 The octave on the cluster is linked to highly optimized MKL mathematical library. This provides threaded parallelization to many octave kernels, notably the linear algebra subroutines. Octave runs these heavy calculation kernels without any penalty. By default, octave would parallelize to 24 threads. You may control the threads by setting the OMP_NUM_THREADS environment variable.
@@ -33,7 +33,7 @@ To run octave in batch mode, write an octave script, then write a bash jobscript
     cp $PBS_O_WORKDIR/octcode.m .
 
     # load octave module
-    module load Octave
+    ml Octave
 
     # execute the calculation
     octave -q --eval octcode > output.out
@@ -53,4 +53,4 @@ The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c c
     $ mkoctfile -v
 ```
 
-Octave may use MPI for inter-process communication This functionality is currently not supported on the cluster cluster. In case you require the octave interface to MPI, please contact our [cluster support](https://support.it4i.cz/rt/).
+Octave may use MPI for inter-process communication This functionality is currently not supported on the cluster cluster. In case you require the octave interface to MPI, contact our [cluster support](https://support.it4i.cz/rt/).
diff --git a/docs.it4i/salomon/storage.md b/docs.it4i/salomon/storage.md
index 61adc35e7..a592c1058 100644
--- a/docs.it4i/salomon/storage.md
+++ b/docs.it4i/salomon/storage.md
@@ -228,7 +228,7 @@ Default ACL mechanism can be used to replace setuid/setgid permissions on direct
 
 ### Home
 
-Users home directories /home/username reside on HOME file system. Accessible capacity is 0.5 PB, shared among all users. Individual users are restricted by file system usage quotas, set to 250 GB per user. If 250 GB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
+Users home directories /home/username reside on HOME file system. Accessible capacity is 0.5 PB, shared among all users. Individual users are restricted by file system usage quotas, set to 250 GB per user. If 250 GB should prove as insufficient for particular user, contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
 
 !!! note
     The HOME file system is intended for preparation, evaluation, processing and storage of data generated by active Projects.
@@ -274,7 +274,7 @@ The WORK workspace is hosted on SCRATCH file system. The SCRATCH is realized as
 
 ### Temp
 
-The TEMP workspace resides on SCRATCH file system. The TEMP workspace accesspoint is  /scratch/temp.  Users may freely create subdirectories and files on the workspace. Accessible capacity is 1.6 PB, shared among all users on TEMP and WORK. Individual users are restricted by file system usage quotas, set to 100 TB per user. The purpose of this quota is to prevent runaway programs from filling the entire file system and deny service to other users. >If 100 TB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
+The TEMP workspace resides on SCRATCH file system. The TEMP workspace accesspoint is  /scratch/temp.  Users may freely create subdirectories and files on the workspace. Accessible capacity is 1.6 PB, shared among all users on TEMP and WORK. Individual users are restricted by file system usage quotas, set to 100 TB per user. The purpose of this quota is to prevent runaway programs from filling the entire file system and deny service to other users. >If 100 TB should prove as insufficient for particular user, contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
 
 !!! note
     The TEMP workspace is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the TEMP workspace as their working directory.
@@ -403,7 +403,7 @@ User of data storage CESNET (DU) association can become organizations or an indi
 
 User may only use data storage CESNET for data transfer and storage which are associated with activities in science, research, development, the spread of education, culture and prosperity. In detail see “Acceptable Use Policy CESNET Large Infrastructure (Acceptable Use Policy, AUP)”.
 
-The service is documented [here](https://du.cesnet.cz/en/start). For special requirements please contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz).
+The service is documented [here](https://du.cesnet.cz/en/start). For special requirements contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz).
 
 The procedure to obtain the CESNET access is quick and trouble-free.
 
@@ -457,7 +457,7 @@ $ cp -a mydir cesnet/.
 $ cp cesnet/myfile .
 ```
 
-Once done, please remember to unmount the storage
+Once done, remember to unmount the storage
 
 ```console
 $ fusermount -u cesnet
diff --git a/docs.it4i/software/cae/comsol/comsol-multiphysics.md b/docs.it4i/software/cae/comsol/comsol-multiphysics.md
index 7a5d7d11f..dfe984124 100644
--- a/docs.it4i/software/cae/comsol/comsol-multiphysics.md
+++ b/docs.it4i/software/cae/comsol/comsol-multiphysics.md
@@ -62,8 +62,8 @@ echo '**PBS_NODEFILE***END*********'
 
 text_nodes < cat $PBS_NODEFILE
 
-module load COMSOL
-# module load COMSOL/51-EDU
+ml COMSOL
+# ml COMSOL/51-EDU
 
 ntask=$(wc -l $PBS_NODEFILE)
 
@@ -76,7 +76,7 @@ Working directory has to be created before sending the (comsol.pbs) job script i
 
 COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB.
 
-LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](../isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode (on Anselm use 16 threads).
+LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (see the [ISV Licenses](../isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode (on Anselm use 16 threads).
 
 ```console
 $ xhost +
@@ -107,8 +107,8 @@ echo '**PBS_NODEFILE***END*********'
 
 text_nodes < cat $PBS_NODEFILE
 
-module load MATLAB
-module load COMSOL/51-EDU
+ml MATLAB
+ml COMSOL/51-EDU
 
 ntask=$(wc -l $PBS_NODEFILE)
 
diff --git a/docs.it4i/software/compilers.md b/docs.it4i/software/compilers.md
index f35b01f25..293926cbe 100644
--- a/docs.it4i/software/compilers.md
+++ b/docs.it4i/software/compilers.md
@@ -24,14 +24,14 @@ Commercial licenses:
 
 ## Intel Compilers
 
-For information about the usage of Intel Compilers and other Intel products, please read the [Intel Parallel studio](intel-suite/intel-compilers/) page.
+For information about the usage of Intel Compilers and other Intel products, read the [Intel Parallel studio](intel-suite/intel-compilers/) page.
 
 ## PGI Compilers (Only on Salomon)
 
 The Portland Group Cluster Development Kit (PGI CDK) is available on Salomon.
 
 ```console
-$ module load PGI
+$ ml PGI
 $ pgcc -v
 $ pgc++ -v
 $ pgf77 -v
@@ -45,8 +45,8 @@ The PGI CDK also incudes tools for debugging and profiling.
 PGDBG OpenMP/MPI debugger and PGPROF OpenMP/MPI profiler are available
 
 ```console
-$ module load PGI
-$ module load Java
+$ ml PGI
+$ ml Java
 $ pgdbg &
 $ pgprof &
 ```
@@ -60,7 +60,7 @@ For compatibility reasons there are still available the original (old 4.4.7-11)
 It is strongly recommended to use the up to date version which comes with the module GCC:
 
 ```console
-$ module load GCC
+$ ml GCC
 $ gcc -v
 $ g++ -v
 $ gfortran -v
@@ -76,7 +76,7 @@ $ echo $DEBUGFLAGS
 -O0 -g
 ```
 
-For more information about the possibilities of the compilers, please see the man pages.
+For more information about the possibilities of the compilers, see the man pages.
 
 ## Unified Parallel C
 
@@ -187,8 +187,8 @@ For more information see the man pages.
 
 ## Java
 
-For information how to use Java (runtime and/or compiler), please read the [Java page](java/).
+For information how to use Java (runtime and/or compiler), read the [Java page](java/).
 
 ## NVIDIA CUDA
 
-For information how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](../anselm/software/nvidia-cuda/).
+For information how to work with NVIDIA CUDA, read the [NVIDIA CUDA page](../anselm/software/nvidia-cuda/).
diff --git a/docs.it4i/software/debuggers/aislinn.md b/docs.it4i/software/debuggers/aislinn.md
index 2a945a04e..0a10684f0 100644
--- a/docs.it4i/software/debuggers/aislinn.md
+++ b/docs.it4i/software/debuggers/aislinn.md
@@ -6,7 +6,7 @@
 * Web page of the project: <http://verif.cs.vsb.cz/aislinn/>
 
 !!! note
-    Aislinn is software developed at IT4Innovations and some parts are still considered experimental. If you have any questions or experienced any problems, please contact the author: <mailto:stanislav.bohm@vsb.cz>.
+    Aislinn is software developed at IT4Innovations and some parts are still considered experimental. If you have any questions or experienced any problems, contact the author: <mailto:stanislav.bohm@vsb.cz>.
 
 ## Usage
 
diff --git a/docs.it4i/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/software/debuggers/intel-vtune-amplifier.md
index 8842e4f47..d3529ba98 100644
--- a/docs.it4i/software/debuggers/intel-vtune-amplifier.md
+++ b/docs.it4i/software/debuggers/intel-vtune-amplifier.md
@@ -72,7 +72,7 @@ This mode is useful for native Xeon Phi applications launched directly on the ca
 This mode is useful for applications that are launched from the host and use offload, OpenCL or mpirun. In *Analysis Target* window, select *Intel Xeon Phi coprocessor (native)*, choose path to the binaryand MIC card to run on.
 
 !!! note
-    If the analysis is interrupted or aborted, further analysis on the card might be impossible and you will get errors like "ERROR connecting to MIC card". In this case please contact our support to reboot the MIC card.
+    If the analysis is interrupted or aborted, further analysis on the card might be impossible and you will get errors like "ERROR connecting to MIC card". In this case contact our support to reboot the MIC card.
 
 You may also use remote analysis to collect data from the MIC and then analyze it in the GUI later :
 
diff --git a/docs.it4i/software/debuggers/papi.md b/docs.it4i/software/debuggers/papi.md
index c8c661947..a873d3693 100644
--- a/docs.it4i/software/debuggers/papi.md
+++ b/docs.it4i/software/debuggers/papi.md
@@ -16,7 +16,7 @@ To use PAPI, load [module](../../environment-and-modules/) PAPI:
 $ ml papi
 ```
 
-This will load the default version. Execute module avail pap for a list of installed versions.
+This will load the default version. Execute ml av papi for a list of installed versions.
 
 ## Utilities
 
diff --git a/docs.it4i/software/debuggers/score-p.md b/docs.it4i/software/debuggers/score-p.md
index 186762617..45de7f9cb 100644
--- a/docs.it4i/software/debuggers/score-p.md
+++ b/docs.it4i/software/debuggers/score-p.md
@@ -114,4 +114,4 @@ subroutine foo(...)
 end subroutine foo
 ```
 
-The directives are ignored if the program is compiled without Score-P. Again, please refer to the [documentation](https://silc.zih.tu-dresden.de/scorep-current/pdf/scorep.pdf) for a more elaborate description.
+The directives are ignored if the program is compiled without Score-P. Again, refer to the [documentation](https://silc.zih.tu-dresden.de/scorep-current/pdf/scorep.pdf) for a more elaborate description.
diff --git a/docs.it4i/software/debuggers/valgrind.md b/docs.it4i/software/debuggers/valgrind.md
index d94d205a7..6acf83911 100644
--- a/docs.it4i/software/debuggers/valgrind.md
+++ b/docs.it4i/software/debuggers/valgrind.md
@@ -15,7 +15,7 @@ The main tools available in Valgrind are :
 * **Hellgrind** and **DRD** can detect race conditions in multi-threaded applications.
 * **Cachegrind**, a cache profiler.
 * **Callgrind**, a callgraph analyzer.
-* For a full list and detailed documentation, please refer to the [official Valgrind documentation](http://valgrind.org/docs/).
+* For a full list and detailed documentation, refer to the [official Valgrind documentation](http://valgrind.org/docs/).
 
 ## Installed Versions
 
@@ -28,7 +28,7 @@ There are two versions of Valgrind available on the Salomon.
 
 * Version 3.8.1, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. Also, it does not support AVX2 instructions, debugging of an AVX2-enabled executable with this version will fail
 * Version 3.11.0 built by ICC with support for Intel MPI, available in module Valgrind/3.11.0-intel-2015b. After loading the module, this version replaces the default valgrind.
-* Version 3.11.0 built by GCC with support for Open MPI, module Valgrind/3.11.0-foss-2015b
+* Version 3.11.0 built by GCC with support for OpenMPI, module Valgrind/3.11.0-foss-2015b
 
 ## Usage
 
diff --git a/docs.it4i/software/intel/intel-xeon-phi-anselm.md b/docs.it4i/software/intel/intel-xeon-phi-anselm.md
index b1e86256d..009c57fd5 100644
--- a/docs.it4i/software/intel/intel-xeon-phi-anselm.md
+++ b/docs.it4i/software/intel/intel-xeon-phi-anselm.md
@@ -258,7 +258,7 @@ or by setting environment variable
 $ export MKL_MIC_ENABLE=1
 ```
 
-To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation).
+To get more information about automatic offload refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation).
 
 ### Automatic Offload Example
 
@@ -266,7 +266,7 @@ At first get an interactive PBS session on a node with MIC accelerator and load
 
 ```console
 $ qsub -I -q qmic -A OPEN-0-0 -l select=1:ncpus=16
-$ module load intel
+$ ml intel
 ```
 
 Following example show how to automatically offload an SGEMM (single precision - general matrix multiply) function to MIC coprocessor. The code can be copied to a file and compiled without any necessary modification.
@@ -440,7 +440,7 @@ OpenCL (Open Computing Language) is an open standard for general-purpose paralle
 On Anselm OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes.
 
 ```console
-module load opencl-sdk opencl-rt
+ml opencl-sdk opencl-rt
 ```
 
 Always load "opencl-sdk" (providing devel files like headers) and "opencl-rt" (providing dynamic library libOpenCL.so) modules to compile and link OpenCL code. Load "opencl-rt" for running your compiled code.
@@ -555,7 +555,7 @@ $ qsub -I -q qmic -A NONE-0-0
 The only supported implementation of MPI standard for Intel Xeon Phi is Intel MPI. To setup a fully functional development environment a combination of Intel compiler and Intel MPI has to be used. On a host load following modules before compilation:
 
 ```console
-$ module load intel
+$ ml intel
 ```
 
 To compile an MPI code for host use:
@@ -715,7 +715,7 @@ The output should be again similar to:
 ```
 
 !!! note
-    `mpiexec.hydra` requires a file the MIC filesystem. If the file is missing please contact the system administrators.
+    `mpiexec.hydra` requires a file the MIC filesystem. If the file is missing contact the system administrators.
 
 A simple test to see if the file is present is to execute:
 
@@ -901,4 +901,4 @@ Each host or accelerator is listed only once per file. User has to specify how m
 
 ## Optimization
 
-For more details about optimization techniques please read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http&#x3A;//software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization")
+For more details about optimization techniques read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http&#x3A;//software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization")
diff --git a/docs.it4i/software/intel/intel-xeon-phi-salomon.md b/docs.it4i/software/intel/intel-xeon-phi-salomon.md
index 8d6c9c619..982f2d59c 100644
--- a/docs.it4i/software/intel/intel-xeon-phi-salomon.md
+++ b/docs.it4i/software/intel/intel-xeon-phi-salomon.md
@@ -304,7 +304,7 @@ or by setting environment variable
 $ export MKL_MIC_ENABLE=1
 ```
 
-To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation).
+To get more information about automatic offload refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation).
 
 ### Automatic Offload Example
 
@@ -532,7 +532,7 @@ OpenCL (Open Computing Language) is an open standard for general-purpose paralle
 On Salomon OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes.
 
 ```console
-module load opencl-sdk opencl-rt
+ml opencl-sdk opencl-rt
 ```
 
 Always load "opencl-sdk" (providing devel files like headers) and "opencl-rt" (providing dynamic library libOpenCL.so) modules to compile and link OpenCL code. Load "opencl-rt" for running your compiled code.
@@ -640,7 +640,7 @@ $ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:acceler
 The only supported implementation of MPI standard for Intel Xeon Phi is Intel MPI. To setup a fully functional development environment a combination of Intel compiler and Intel MPI has to be used. On a host load following modules before compilation:
 
 ```console
-$ module load intel
+$ ml intel
 ```
 
 To compile an MPI code for host use:
@@ -815,7 +815,7 @@ Hello world from process 0 of 4 on host r38u31n1000-mic0
 ```
 
 !!! hint
-    **"mpiexec.hydra"** requires a file the MIC filesystem. If the file is missing please contact the system administrators.
+    **"mpiexec.hydra"** requires a file the MIC filesystem. If the file is missing contact the system administrators.
 
 A simple test to see if the file is present is to execute:
 
@@ -830,7 +830,7 @@ To get access to multiple nodes with MIC accelerator, user has to use PBS to all
 
 ```console
 $ qsub -I -q qprod -l select=2:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
-$ module load intel impi
+$ ml intel impi
 ```
 
 This command connects user through ssh to one of the nodes immediately. To see the other nodes that have been allocated use:
@@ -987,4 +987,4 @@ Each host or accelerator is listed only once per file. User has to specify how m
 
 ## Optimization
 
-For more details about optimization techniques please read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http&#x3A;//software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization")
+For more details about optimization techniques read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http&#x3A;//software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization")
diff --git a/docs.it4i/software/isv_licenses.md b/docs.it4i/software/isv_licenses.md
index 63cd44719..ad804ef99 100644
--- a/docs.it4i/software/isv_licenses.md
+++ b/docs.it4i/software/isv_licenses.md
@@ -93,7 +93,7 @@ Example of PBS Pro resource name, based on APP and FEATURE name:
 | matlab-edu  | MATLAB_Distrib_Comp_Engine | feature_matlab-edu_MATLAB_Distrib_Comp_Engine   |
 | matlab-edu  | Image_Acquisition_Toolbox  | feature_matlab-edu_Image_Acquisition_Toolbox\\  |
 
-!!! Warnig
+!!! warnig
 Resource names in PBS Pro are case sensitive.
 
 ### Example of qsub Statement
diff --git a/docs.it4i/software/lang/java.md b/docs.it4i/software/lang/java.md
index d8e10e70a..67e77ab26 100644
--- a/docs.it4i/software/lang/java.md
+++ b/docs.it4i/software/lang/java.md
@@ -22,13 +22,13 @@ $ javac -version
 $ which javac
 ```
 
-Java applications may use MPI for inter-process communication, in conjunction with Open MPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [cluster support](https://support.it4i.cz/rt/).
+Java applications may use MPI for inter-process communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, contact [cluster support](https://support.it4i.cz/rt/).
 
 ## Java With OpenMPI
 
 Because there is an increasing interest in using Java for HPC. Also, MPI can benefit from Java because its widespread use makes it likely to find new uses beyond traditional HPC applications.
 
-The Java bindings are integrated into Open MPI starting from the v1.7 series. Beginning with the v2.0 series, the Java bindings include coverage of MPI-3.1.
+The Java bindings are integrated into OpenMPI starting from the v1.7 series. Beginning with the v2.0 series, the Java bindings include coverage of MPI-3.1.
 
 ### Example (Hello.java)
 
@@ -83,4 +83,4 @@ Hello world from rank 20 of 28
 Hello world from rank 5 of 28
 Hello world from rank 21 of 28
 Hello world from rank 22 of 28
-```
\ No newline at end of file
+```
diff --git a/docs.it4i/software/modules/lmod.md b/docs.it4i/software/modules/lmod.md
index 4aad91ce8..bb949145c 100644
--- a/docs.it4i/software/modules/lmod.md
+++ b/docs.it4i/software/modules/lmod.md
@@ -6,7 +6,7 @@ Detailed documentation on Lmod is available [here](http://lmod.readthedocs.io).
 
 ## Benefits
 
-* significantly more responsive module commands, in particular module avail (ml av)
+* significantly more responsive module commands, in particular ml av
 * easier to use interface
 * module files can be written in either Tcl or Lua syntax (and both types of modules can be mixed together)
 
@@ -17,10 +17,10 @@ Below you will find more details and examples.
 | command                  | equivalent/explanation                                           |
 | ------------------------ | ---------------------------------------------------------------- |
 | ml                       | module list                                                      |
-| ml GCC/6.2.0-2.27        | module load GCC/6.2.0-2.27                                       |
+| ml GCC/6.2.0-2.27        | ml GCC/6.2.0-2.27                                                |
 | ml -GCC/6.2.0-2.27       | module unload GCC/6.2.0-2.27                                     |
 | ml purge                 | module unload all modules                                        |
-| ml av                    | module avail                                                     |
+| ml av                    | ml av                                                            |
 | ml show GCC/6.2.0-2.27   | module show GCC                                                  |
 | ml spider                | gcc searches (case-insensitive) for gcc in all available modules |
 | ml spider GCC/6.2.0-2.27 | show all information about the module GCC/6.2.0-2.27             |
@@ -121,7 +121,7 @@ $ module spider GCC/6.2.0-2.27
     Description:
       The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). - Homepage: http://gcc.gnu.org/
 
-    This module can be loaded directly: module load GCC/6.2.0-2.27
+    This module can be loaded directly: ml GCC/6.2.0-2.27
 
     Help:
       The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada,
diff --git a/docs.it4i/software/mpi/mpi.md b/docs.it4i/software/mpi/mpi.md
index ab7741589..c65c0d8d2 100644
--- a/docs.it4i/software/mpi/mpi.md
+++ b/docs.it4i/software/mpi/mpi.md
@@ -13,7 +13,7 @@ The Salomon cluster provides several implementations of the MPI library:
 
 MPI libraries are activated via the environment modules.
 
-Look up section modulefiles/mpi in module avail
+Look up section modulefiles/mpi in ml av
 
 ```console
 $ ml av
diff --git a/docs.it4i/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/software/mpi/mpi4py-mpi-for-python.md
index c2a2976b5..a3b3f69ed 100644
--- a/docs.it4i/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/software/mpi/mpi4py-mpi-for-python.md
@@ -28,7 +28,7 @@ OpenMPI/1.8.6-GNU-5.1.0-2.25     OpenMPI/1.8.8-GNU-5.1.0-2.25  OpenMPI/1.10.1-GN
     OpenMPI/1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25   OpenMPI/2.0.2-GCC-6.3.0-2.27
 ```
 
-!!! Warning "Flavours"
+!!! warning "Flavours"
 
     * modules Python/x.x.x-intel... - intel MPI
     * modules Python/x.x.x-foss...  - OpenMPI
diff --git a/docs.it4i/software/mpi/ompi-examples.md b/docs.it4i/software/mpi/ompi-examples.md
index 912f7d348..b96406497 100644
--- a/docs.it4i/software/mpi/ompi-examples.md
+++ b/docs.it4i/software/mpi/ompi-examples.md
@@ -1,6 +1,6 @@
 # OpenMPI Sample Applications
 
-Sample MPI applications provided both as a trivial primer to MPI as well as simple tests to ensure that your Open MPI installation is working properly.
+Sample MPI applications provided both as a trivial primer to MPI as well as simple tests to ensure that your OpenMPI installation is working properly.
 
 ## Examples
 
@@ -38,11 +38,11 @@ Additionally, there's one further example application, but this one only uses th
 
 Download [examples](../../src/ompi/ompi.tar.gz).
 
-The Makefile in this directory will build the examples for the supported languages (e.g., if you do not have the Fortran "use mpi" bindings compiled as part of Open MPI, those examples will be skipped).
+The Makefile in this directory will build the examples for the supported languages (e.g., if you do not have the Fortran "use mpi" bindings compiled as part of OpenMPI, those examples will be skipped).
 
 The Makefile assumes that the wrapper compilers mpicc, mpic++, and mpifort are in your path.
 
-Although the Makefile is tailored for Open MPI (e.g., it checks the *mpi_info* command to see if you have support for C++, mpif.h, use mpi, and use mpi_f08 F90), all of the example programs are pure MPI, and therefore not specific to Open MPI.  Hence, you can use a different MPI implementation to compile and run these programs if you wish.
+Although the Makefile is tailored for OpenMPI (e.g., it checks the *mpi_info* command to see if you have support for C++, mpif.h, use mpi, and use mpi_f08 F90), all of the example programs are pure MPI, and therefore not specific to OpenMPI.  Hence, you can use a different MPI implementation to compile and run these programs if you wish.
 
 ```console
 [login@cn204.anselm ]$ tar xvf ompi.tar.gz
diff --git a/docs.it4i/software/numerical-languages/matlab.md b/docs.it4i/software/numerical-languages/matlab.md
index e3bccc1a9..89446eb39 100644
--- a/docs.it4i/software/numerical-languages/matlab.md
+++ b/docs.it4i/software/numerical-languages/matlab.md
@@ -21,9 +21,9 @@ $ ml av MATLAB
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
 
-If you require the Matlab GUI, please follow the general information about [running graphical applications](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+If you require the Matlab GUI, follow the general information about [running graphical applications](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
-Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part [here](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended.
+Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended.
 
 To run Matlab with GUI, use
 
@@ -104,7 +104,7 @@ mkdir -p $SCR ; cd $SCR || exit
 cp $PBS_O_WORKDIR/matlabcode.m .
 
 # load modules
-module load MATLAB/2015a-EDU
+ml MATLAB/2015a-EDU
 
 # execute the calculation
 matlab -nodisplay -r matlabcode > output.out
@@ -249,15 +249,15 @@ delete(pool)
 
 ### Non-Interactive Session and Licenses
 
-If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/).
+If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](../isv_licenses/).
 
 The licensing feature of PBS is currently disabled.
 
-In case of non-interactive session please read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
+In case of non-interactive session read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
 
 ### Matlab Distributed Computing Engines Start Up Time
 
-Starting Matlab workers is an expensive process that requires certain amount of time. For your information please see the following table:
+Starting Matlab workers is an expensive process that requires certain amount of time. For your information see the following table:
 
 | compute nodes | number of workers | start-up time[s] |
 | ------------- | ----------------- | ---------------- |
diff --git a/docs.it4i/software/numerical-languages/matlab_1314.md b/docs.it4i/software/numerical-languages/matlab_1314.md
index 9760bf63c..69e666994 100644
--- a/docs.it4i/software/numerical-languages/matlab_1314.md
+++ b/docs.it4i/software/numerical-languages/matlab_1314.md
@@ -3,7 +3,7 @@
 ## Introduction
 
 !!! note
-    This document relates to the old versions R2013 and R2014. For MATLAB 2015, please use [this documentation instead](matlab/).
+    This document relates to the old versions R2013 and R2014. For MATLAB 2015 use [this documentation instead](matlab/).
 
 Matlab is available in the latest stable version. There are always two variants of the release:
 
@@ -24,9 +24,9 @@ $ ml matlab
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
 
-If you require the Matlab GUI, please follow the general information about running graphical applications
+If you require the Matlab GUI, follow the general information about running graphical applications
 
-Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part) is recommended.
+Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part) is recommended.
 
 To run Matlab with GUI, use
 
@@ -111,8 +111,8 @@ mkdir -p $SCR ; cd $SCR || exit
 cp $PBS_O_WORKDIR/matlabcode.m .
 
 # load modules
-module load matlab/R2013a-EDU
-module load impi/4.1.1.036
+ml matlab/R2013a-EDU
+ml impi/4.1.1.036
 
 # execute the calculation
 matlab -nodisplay -r matlabcode > output.out
@@ -190,13 +190,13 @@ You can copy and paste the example in a .m file and execute. Note that the matla
 
 ### Non-Interactive Session and Licenses
 
-If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/).
+If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](../isv_licenses/).
 
-In case of non-interactive session please read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
+In case of non-interactive session read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
 
 ### Matlab Distributed Computing Engines Start Up Time
 
-Starting Matlab workers is an expensive process that requires certain amount of time. For your information please see the following table:
+Starting Matlab workers is an expensive process that requires certain amount of time. For your information see the following table:
 
 | compute nodes | number of workers | start-up time[s] |
 | ------------- | ----------------- | ---------------- |
diff --git a/docs.it4i/software/numerical-languages/octave.md b/docs.it4i/software/numerical-languages/octave.md
index 8a3eb55ce..b3ce19550 100644
--- a/docs.it4i/software/numerical-languages/octave.md
+++ b/docs.it4i/software/numerical-languages/octave.md
@@ -36,7 +36,7 @@ cd /lscratch/$PBS_JOBID || exit
 cp $PBS_O_WORKDIR/octcode.m .
 
 # load octave module
-module load octave
+ml octave
 
 # execute the calculation
 octave -q --eval octcode > output.out
@@ -56,7 +56,7 @@ The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c c
 $ mkoctfile -v
 ```
 
-Octave may use MPI for interprocess communication This functionality is currently not supported on Anselm cluster. In case you require the octave interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/).
+Octave may use MPI for interprocess communication This functionality is currently not supported on Anselm cluster. In case you require the octave interface to MPI, contact [Anselm support](https://support.it4i.cz/rt/).
 
 ## Xeon Phi Support
 
diff --git a/docs.it4i/software/numerical-languages/opencoarrays.md b/docs.it4i/software/numerical-languages/opencoarrays.md
index bfbbb7f65..d6788ef14 100644
--- a/docs.it4i/software/numerical-languages/opencoarrays.md
+++ b/docs.it4i/software/numerical-languages/opencoarrays.md
@@ -124,4 +124,4 @@ $ mpiexec -np 4 ./synchronization_test.x
 
 **-np 4** is number of images to run. The parameters of **cafrun** and **mpiexec** are the same.
 
-For more information about running CAF program please follow [Running OpenMPI - Salomon](../mpi/Running_OpenMPI.md)
+For more information about running CAF program follow [Running OpenMPI - Salomon](../mpi/Running_OpenMPI.md)
diff --git a/docs.it4i/software/numerical-languages/r.md b/docs.it4i/software/numerical-languages/r.md
index c7f112c63..e83388e60 100644
--- a/docs.it4i/software/numerical-languages/r.md
+++ b/docs.it4i/software/numerical-languages/r.md
@@ -14,12 +14,12 @@ Read more on <http://www.r-project.org/>, <http://cran.r-project.org/doc/manuals
 
 ## Modules
 
-The R version 3.1.1 is available on the cluster, along with GUI interface Rstudio
+The R version 3.1.1 is available on the cluster, along with GUI interface RStudio
 
 | Application | Version           | module              |
 | ----------- | ----------------- | ------------------- |
 | **R**       | R 3.1.1           | R/3.1.1-intel-2015b |
-| **Rstudio** | Rstudio 0.98.1103 | Rstudio             |
+| **RStudio** | RStudio 0.98.1103 | RStudio             |
 
 ```console
 $ ml R
@@ -31,10 +31,10 @@ The R on cluster is linked to highly optimized MKL mathematical library. This pr
 
 ### Interactive Execution
 
-To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 forwarding. Run rstudio:
+To run R interactively, using RStudio GUI, log in with ssh -X parameter for X11 forwarding. Run rstudio:
 
 ```console
-$ ml Rstudio
+$ ml RStudio
 $ rstudio
 ```
 
@@ -54,7 +54,7 @@ cd /lscratch/$PBS_JOBID || exit
 cp $PBS_O_WORKDIR/rscript.R .
 
 # load R module
-module load R
+ml R
 
 # execute the calculation
 R CMD BATCH rscript.R routput.out
@@ -377,8 +377,8 @@ cd $SCRDIR || exit
 cp $PBS_O_WORKDIR/rscript.R .
 
 # load R and openmpi module
-module load R
-module load OpenMPI
+ml R
+ml OpenMPI
 
 # execute the calculation
 mpiexec -bycore -bind-to-core R --slave --no-save --no-restore -f rscript.R
diff --git a/docs.it4i/software/numerical-libraries/hdf5.md b/docs.it4i/software/numerical-libraries/hdf5.md
index 7b61eeb5f..6ebeebe60 100644
--- a/docs.it4i/software/numerical-libraries/hdf5.md
+++ b/docs.it4i/software/numerical-libraries/hdf5.md
@@ -24,7 +24,7 @@ $ ml hdf5-parallel
 The module sets up environment variables, required for linking and running HDF5 enabled applications. Make sure that the choice of HDF5 module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results.
 
 !!! note
-    Be aware, that GCC version of **HDF5 1.8.11** has serious performance issues, since it's compiled with -O0 optimization flag. This version is provided only for testing of code compiled only by GCC and IS NOT recommended for production computations. For more information, please see: <http://www.hdfgroup.org/ftp/HDF5/prev-releases/ReleaseFiles/release5-1811>
+    Be aware, that GCC version of **HDF5 1.8.11** has serious performance issues, since it's compiled with -O0 optimization flag. This version is provided only for testing of code compiled only by GCC and IS NOT recommended for production computations. For more information, see: <http://www.hdfgroup.org/ftp/HDF5/prev-releases/ReleaseFiles/release5-1811>
 
     All GCC versions of **HDF5 1.8.13** are not affected by the bug, are compiled with -O3 optimizations and are recommended for production computations.
 
@@ -86,4 +86,4 @@ $ mpicc hdf5test.c -o hdf5test.x -Wl,-rpath=$LIBRARY_PATH $HDF5_INC $HDF5_SHLIB
 
 Run the example as [Intel MPI program](../mpi/running-mpich2/).
 
-For further information, please see the website: <http://www.hdfgroup.org/HDF5/>
+For further information, see the website: <http://www.hdfgroup.org/HDF5/>
diff --git a/docs.it4i/software/orca.md b/docs.it4i/software/orca.md
index 8215a9ead..0684e29fa 100644
--- a/docs.it4i/software/orca.md
+++ b/docs.it4i/software/orca.md
@@ -7,7 +7,7 @@ ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum c
 The following module command makes the latest version of orca available to your session
 
 ```bash
-$ module load ORCA/3_0_3-linux_x86-64
+$ ml ORCA/3_0_3-linux_x86-64
 ```
 
 ### Dependency
@@ -49,7 +49,7 @@ Create a Sun Grid Engine submission file called submit_serial.sh that looks like
 ```bash
 !/bin/bash
 
-module load ORCA/3_0_3-linux_x86-64
+ml ORCA/3_0_3-linux_x86-64
 orca orca_serial.inp
 ```
 
diff --git a/docs.it4i/software/tools/ansys/ansys-cfx.md b/docs.it4i/software/tools/ansys/ansys-cfx.md
index cb77fdf45..45acd6f3e 100644
--- a/docs.it4i/software/tools/ansys/ansys-cfx.md
+++ b/docs.it4i/software/tools/ansys/ansys-cfx.md
@@ -25,7 +25,7 @@ echo Directory is `pwd`
 echo This jobs runs on the following processors:
 echo `cat $PBS_NODEFILE`
 
-module load ansys
+ml ansys
 
 #### Set number of processors per host listing
 #### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2)
diff --git a/docs.it4i/software/tools/ansys/ansys-fluent.md b/docs.it4i/software/tools/ansys/ansys-fluent.md
index ee8ac3a5e..e11b8597b 100644
--- a/docs.it4i/software/tools/ansys/ansys-fluent.md
+++ b/docs.it4i/software/tools/ansys/ansys-fluent.md
@@ -30,7 +30,7 @@ echo This jobs runs on the following processors:
 echo `cat $PBS_NODEFILE`
 
 #### Load ansys module so that we find the cfx5solve command
-module load ansys
+ml ansys
 
 # Use following line to specify MPI for message-passing instead
 NCORES=`wc -l $PBS_NODEFILE |awk '{print $1}'`
@@ -82,7 +82,7 @@ input is the name of the input file.
 
 case is the name of the .cas file that the input file will utilize.
 
-fluent_args are extra ANSYS FLUENT arguments. As shown in the previous example, you can specify the interconnect by using the  -p interconnect command. The available interconnects include ethernet (the default), myrinet, infiniband,  vendor, altix, and crayx. The MPI is selected automatically, based on the specified interconnect.
+fluent_args are extra ANSYS FLUENT arguments. As shown in the previous example, you can specify the interconnect by using the  -p interconnect command. The available interconnects include ethernet (the default), myrinet, Infiniband,  vendor, altix, and crayx. The MPI is selected automatically, based on the specified interconnect.
 
 outfile is the name of the file to which the standard output will be sent.
 
diff --git a/docs.it4i/software/tools/ansys/ansys-ls-dyna.md b/docs.it4i/software/tools/ansys/ansys-ls-dyna.md
index 7bf643a9c..e3af0318c 100644
--- a/docs.it4i/software/tools/ansys/ansys-ls-dyna.md
+++ b/docs.it4i/software/tools/ansys/ansys-ls-dyna.md
@@ -30,7 +30,7 @@ NPROCS=`wc -l < $PBS_NODEFILE`
 
 echo This job has allocated $NPROCS nodes
 
-module load ansys
+ml ansys
 
 #### Set number of processors per host listing
 #### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2)
diff --git a/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md b/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md
index 116252df5..3db398c40 100644
--- a/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md
+++ b/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md
@@ -26,7 +26,7 @@ echo Directory is `pwd`
 echo This jobs runs on the following processors:
 echo `cat $PBS_NODEFILE`
 
-module load ansys
+ml ansys
 
 #### Set number of processors per host listing
 #### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2)
diff --git a/docs.it4i/software/tools/ansys/ansys.md b/docs.it4i/software/tools/ansys/ansys.md
index d1def39cd..dcd195252 100644
--- a/docs.it4i/software/tools/ansys/ansys.md
+++ b/docs.it4i/software/tools/ansys/ansys.md
@@ -1,6 +1,6 @@
 # Overview of ANSYS Products
 
-**[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM)
+**[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM)
 
 Anselm provides commercial as well as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [More about licensing here](licensing/)
 
diff --git a/docs.it4i/software/tools/ansys/ls-dyna.md b/docs.it4i/software/tools/ansys/ls-dyna.md
index dac7dbe9f..8492ddbf7 100644
--- a/docs.it4i/software/tools/ansys/ls-dyna.md
+++ b/docs.it4i/software/tools/ansys/ls-dyna.md
@@ -25,7 +25,7 @@ echo Running on host `hostname`
 echo Time is `date`
 echo Directory is `pwd`
 
-module load lsdyna
+ml lsdyna
 
 /apps/engineering/lsdyna/lsdyna700s i=input.k
 ```
diff --git a/docs.it4i/software/tools/ansys/workbench.md b/docs.it4i/software/tools/ansys/workbench.md
index fc33e5082..cdbc3c799 100644
--- a/docs.it4i/software/tools/ansys/workbench.md
+++ b/docs.it4i/software/tools/ansys/workbench.md
@@ -36,7 +36,7 @@ Now, save the project and close Workbench. We will use this script to launch the
     echo This jobs runs on the following nodes:
     echo `cat $PBS_NODEFILE`
 
-    module load ANSYS
+    ml ANSYS
 
     #### Set number of processors per host listing
     procs_per_host=24
diff --git a/docs.it4i/software/tools/easybuild-images.md b/docs.it4i/software/tools/easybuild-images.md
index 14187c717..215e7e6bc 100644
--- a/docs.it4i/software/tools/easybuild-images.md
+++ b/docs.it4i/software/tools/easybuild-images.md
@@ -95,7 +95,7 @@ eval "$@"
 %environment
 source /etc/profile
 module use /app/modules/all
-module load Python/3.6.4-foss-2018a OpenMPI/2.1.2-GCC-6.4.0-2.28
+ml Python/3.6.4-foss-2018a OpenMPI/2.1.2-GCC-6.4.0-2.28
 
 %labels
 ```
diff --git a/docs.it4i/software/viz/openfoam.md b/docs.it4i/software/viz/openfoam.md
index df6585429..7765c5472 100644
--- a/docs.it4i/software/viz/openfoam.md
+++ b/docs.it4i/software/viz/openfoam.md
@@ -45,7 +45,7 @@ In /opt/modules/modulefiles/engineering you can see installed engineering softwa
     lsdyna/7.x.x               openfoam/2.2.1-gcc481-openmpi1.6.5-SP
 ```
 
-For information how to use modules please [look here](../../environment-and-modules/).
+For information how to use modules [look here](../../environment-and-modules/).
 
 ## Getting Started
 
@@ -93,7 +93,8 @@ Create a Bash script test.sh
 
 ```bash
 #!/bin/bash
-module load openfoam/2.2.1-icc-openmpi1.6.5-DP
+
+ml openfoam/2.2.1-icc-openmpi1.6.5-DP
 source $FOAM_BASHRC
 
 # source to run functions
@@ -111,7 +112,7 @@ Job submission (example for Anselm):
 $ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh
 ```
 
-For information about job submission please [look here](../../anselm/job-submission-and-execution/).
+For information about job submission [look here](../../anselm/job-submission-and-execution/).
 
 ## Running Applications in Parallel
 
@@ -124,7 +125,8 @@ First we must run serial application bockMesh and decomposePar for preparation o
 
 ```bash
 #!/bin/bash
-module load openfoam/2.2.1-icc-openmpi1.6.5-DP
+
+ml openfoam/2.2.1-icc-openmpi1.6.5-DP
 source $FOAM_BASHRC
 
 # source to run functions
@@ -155,7 +157,7 @@ This job create simple block mesh and domain decomposition. Check your decomposi
 #PBS -q qprod
 #PBS -A OPEN-0-0
 
-module load openfoam/2.2.1-icc-openmpi1.6.5-DP
+ml openfoam/2.2.1-icc-openmpi1.6.5-DP
 source $FOAM_BASHRC
 
 cd $FOAM_RUN/tutorials/incompressible/simpleFoam/motorBike
-- 
GitLab