diff --git a/.spelling b/.spelling
index 8bb1fc223af4955de6d57fa26186b23630386154..fd17bf7f6f059c63d5fbcbc09234cb061f61d213 100644
--- a/.spelling
+++ b/.spelling
@@ -251,3 +251,7 @@ r37u31n1008
 qsub
 it4ifree
 it4i.portal.clients
+API
+GNU
+CUDA
+NVIDIA
diff --git a/docs.it4i/anselm-cluster-documentation/software/index.md b/docs.it4i/anselm-cluster-documentation/software/index.md
deleted file mode 100644
index 760656e5b60f6d3036e5b5dfd122f0b39717ba7c..0000000000000000000000000000000000000000
--- a/docs.it4i/anselm-cluster-documentation/software/index.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# Anselm Cluster Software
-
-## [Modules](../../modules-anselm)
-
--   List of available modules
-    ## [COMSOL](comsol-multiphysics)
--   A finite element analysis, solver and Simulation software
-    ## [ParaView](paraview)
--   An open-source, multi-platform data analysis and visualization application
-    ## [Compilers](compilers)
--   Available compilers, including GNU, INTEL and UPC compilers
-    ## [NVIDIA CUDA](nvidia-cuda)
--   A guide to NVIDIA CUDA programming and GPU usage
-    ## [GPI-2](gpi2)
--   A library that implements the GASPI specification
-    ## [OpenFOAM](openfoam)
--   A free, open source CFD software package
-    ## [ISV Licenses](isv_licenses)
--   A guide to managing Independent Software Vendor licenses
-    ## [Intel Xeon Phi](intel-xeon-phi)
--   A guide to Intel Xeon Phi usage
-    ## [Virtualization](kvirtualization)
-    ## [Java](java)
--   Java on ANSELM
-    ## [Operating System](operating-system)
--   The operating system, deployed on ANSELM
-    ## Intel Suite
--   The Intel Parallel Studio XE
-    ### [Introduction](intel-suite/introduction)
-    ### [Intel MKL](intel-suite/intel-mkl)
-    ### [Intel Compilers](intel-suite/intel-compilers)
-    ### [Intel IPP](intel-suite/intel-integrated-performance-primitives)
-    ### [Intel TBB](intel-suite/intel-tbb)
-    ### [Intel Debugger](intel-suite/intel-debugger)
-    ## MPI
--   Message Passing Interface libraries
-    ### [Introduction](mpi/mpi)
-    ### [MPI4Py (MPI for Python)](mpi/mpi4py-mpi-for-python)
-    ### [Running OpenMPI](mpi/Running_OpenMPI)
-    ### [Running MPICH2](mpi/running-mpich2)
-    ## Numerical Libraries
--   Libraries for numerical computations
-    ### [Intel numerical libraries](numerical-libraries/intel-numerical-libraries)
-    ### [PETSc](numerical-libraries/petsc)
-    ### [Trilinos](numerical-libraries/trilinos)
-    ### [FFTW](numerical-libraries/fftw)
-    ### [GSL](numerical-libraries/gsl)
-    ### [MAGMA for Intel Xeon Phi](numerical-libraries/magma-for-intel-xeon-phi)
-    ### [HDF5](numerical-libraries/hdf5)
-    ## Omics Master
-    ### [Diagnostic component (TEAM)](omics-master/diagnostic-component-team)
-    ### [Prioritization component (BiERapp)](omics-master/priorization-component-bierapp)
-    ### [Overview](omics-master/overview)
-    ## Debuggers
--   A collection of development tools
-    ### [Valgrind](debuggers/valgrind)
-    ### [PAPI](debuggers/papi)
-    ### [Allinea Forge (DDT,MAP)](debuggers/allinea-ddt)
-    ### [Total View](debuggers/total-view)
-    ### [CUBE](debuggers/cube)
-    ### [Intel VTune Amplifier](debuggers/intel-vtune-amplifier)
-    ### [VNC](debuggers/debuggers)
-    ### [Scalasca](debuggers/scalasca)
-    ### [Score-P](debuggers/score-p)
-    ### [Intel Performance Counter Monitor](debuggers/intel-performance-counter-monitor)
-    ### [Allinea Performance Reports](debuggers/allinea-performance-reports)
-    ### [Vampir](debuggers/vampir)
-    ## Numerical Languages
--   Interpreted languages for numerical computations
-    ### [Introduction](numerical-languages/introduction)
-    ### [R](numerical-languages/r)
-    ### [MATLAB 2013-2014](numerical-languages/matlab_1314)
-    ### [MATLAB](numerical-languages/matlab)
-    ### [Octave](numerical-languages/octave)
-    ## Chemistry
--   Tools for computational chemistry
-    ### [Molpro](chemistry/molpro)
-    ### [NWChem](chemistry/nwchem)
-    ## ANSYS
--   An engineering simulation software
-    ### [Introduction](ansys/ansys)
-    ### [ANSYS CFX](ansys/ansys-cfx)
-    ### [ANSYS LS-DYNA](ansys/ansys-ls-dyna)
-    ### [ANSYS MAPDL](ansys/ansys-mechanical-apdl)
-    ### [LS-DYNA](ansys/ls-dyna)
-    ### [ANSYS Fluent](ansys/ansys-fluent)
diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md
index f06365e8fa6c86614a4da6f3c2e533041caaccac..f59b0417fb92c137266d25a227f2e5a7a83c49c8 100644
--- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md
+++ b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md
@@ -6,7 +6,7 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e
 
 -   BLAS (level 1, 2, and 3) and LAPACK linear algebra routines, offering vector, vector-matrix, and matrix-matrix operations.
 -   The PARDISO direct sparse solver, an iterative sparse solver, and supporting sparse BLAS (level 1, 2, and 3) routines for solving sparse systems of equations.
--   ScaLAPACK distributed processing linear algebra routines for Linux_ and Windows_ operating systems, as well as the Basic Linear Algebra Communications Subprograms (BLACS) and the Parallel Basic Linear Algebra Subprograms (PBLAS).
+-   ScaLAPACK distributed processing linear algebra routines for Linux and Windows operating systems, as well as the Basic Linear Algebra Communications Subprograms (BLACS) and the Parallel Basic Linear Algebra Subprograms (PBLAS).
 -   Fast Fourier transform (FFT) functions in one, two, or three dimensions with support for mixed radices (not limited to sizes that are powers of 2), as well as distributed versions of these functions.
 -   Vector Math Library (VML) routines for optimized mathematical operations on vectors.
 -   Vector Statistical Library (VSL) routines, which offer high-performance vectorized random number generators (RNG) for    several probability distributions, convolution and correlation routines, and summary statistics functions.
diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md b/docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md
index fda98aa7c9a03a57e483bb446c1fcb42ba8224d3..4cc1f5383e6214f064b182543252fc9b97723430 100644
--- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md
+++ b/docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md
@@ -2,13 +2,11 @@
 
 The Anselm cluster provides following elements of the Intel Parallel Studio XE
 
-|Intel Parallel Studio XE|
-\|-------------------------------------------------\|
-|Intel Compilers|
-|Intel Debugger|
-|Intel MKL Library|
-|Intel Integrated Performance Primitives Library|
-|Intel Threading Building Blocks Library|
+* Intel Compilers
+* Intel Debugger
+* Intel MKL Library
+* Intel Integrated Performance Primitives Library
+* Intel Threading Building Blocks Library
 
 ## Intel compilers
 
diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md b/docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md
index c9f5c39c01dd2fdb91eebcadaf7dc5e357462843..fa9e5273d39ab8e27488d560cf5fa959efb2f783 100644
--- a/docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md
+++ b/docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md
@@ -1,6 +1,6 @@
 # Intel Xeon Phi
 
-\##A guide to Intel Xeon Phi usage
+## A guide to Intel Xeon Phi usage
 
 Intel Xeon Phi can be programmed in several modes. The default mode on Anselm is offload mode, but all modes described in this document are supported.
 
@@ -9,19 +9,19 @@ Intel Xeon Phi can be programmed in several modes. The default mode on Anselm is
 To get access to a compute node with Intel Xeon Phi accelerator, use the PBS interactive session
 
 ```bash
-    $ qsub -I -q qmic -A NONE-0-0
+$ qsub -I -q qmic -A NONE-0-0
 ```
 
 To set up the environment module "Intel" has to be loaded
 
 ```bash
-    $ module load intel/13.5.192
+$ module load intel/13.5.192
 ```
 
 Information about the hardware can be obtained by running the micinfo program on the host.
 
 ```bash
-    $ /usr/bin/micinfo
+$ /usr/bin/micinfo
 ```
 
 The output of the "micinfo" utility executed on one of the Anselm node is as follows. (note: to get PCIe related details the command has to be run with root privileges)
@@ -93,14 +93,14 @@ The output of the "micinfo" utility executed on one of the Anselm node is as fol
 To compile a code for Intel Xeon Phi a MPSS stack has to be installed on the machine where compilation is executed. Currently the MPSS stack is only installed on compute nodes equipped with accelerators.
 
 ```bash
-    $ qsub -I -q qmic -A NONE-0-0
-    $ module load intel/13.5.192
+$ qsub -I -q qmic -A NONE-0-0
+$ module load intel/13.5.192
 ```
 
 For debugging purposes it is also recommended to set environment variable "OFFLOAD_REPORT". Value can be set from 0 to 3, where higher number means more debugging information.
 
 ```bash
-    export OFFLOAD_REPORT=3
+export OFFLOAD_REPORT=3
 ```
 
 A very basic example of code that employs offload programming technique is shown in the next listing. Please note that this code is sequential and utilizes only single core of the accelerator.
@@ -607,7 +607,7 @@ Intel MPI for the Xeon Phi coprocessors offers different MPI programming models:
 
     **Symmetric model** - the MPI ranks reside on both the host and the coprocessor. Most general MPI case.
 
-\###Host-only model
+### Host-only model
 
 In this case all environment variables are set by modules, so to execute the compiled MPI program on a single node, use:
 
@@ -809,7 +809,7 @@ The same way MPI program can be executed on multiple hosts:
     : -host cn205 -n 6 ~/mpi-test
 ```
 
-\###Symmetric model
+### Symmetric model
 
 In a symmetric mode MPI programs are executed on both host computer(s) and MIC accelerator(s). Since MIC has a different
 architecture and requires different binary file produced by the Intel compiler two different files has to be compiled before MPI program is executed.
diff --git a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md b/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md
index 61d77e17a7610a6ecfc03aa6f1ed97d36831b204..8ef95f5491139c3641bed38709143557dc786839 100644
--- a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md
+++ b/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md
@@ -1,6 +1,6 @@
 # ISV Licenses
 
-\##A guide to managing Independent Software Vendor licenses
+## A guide to managing Independent Software Vendor licenses
 
 On Anselm cluster there are also installed commercial software applications, also known as ISV (Independent Software Vendor), which are subjects to licensing. The licenses are limited and their usage may be restricted only to some users or user groups.
 
@@ -57,9 +57,7 @@ Example of the Commercial Matlab license state:
 
 Each feature of each license is accounted and checked by the scheduler of PBS Pro. If you ask for certain licenses, the scheduler won't start the job until the asked licenses are free (available). This prevents to crash batch jobs, just because of unavailability of the needed licenses.
 
-The general format of the name is:
-
-**feature**APP**FEATURE**
+The general format of the name is `feature__APP__FEATURE`.
 
 Names of applications (APP):
 
diff --git a/docs.it4i/anselm-cluster-documentation/software/java.md b/docs.it4i/anselm-cluster-documentation/software/java.md
index a9aa360e91b97b7dbbdc441d225b4b14b40f2f00..ddf032eb4eef469e8c68de98f16965696b153c72 100644
--- a/docs.it4i/anselm-cluster-documentation/software/java.md
+++ b/docs.it4i/anselm-cluster-documentation/software/java.md
@@ -1,6 +1,6 @@
 # Java
 
-\##Java on ANSELM
+## Java on ANSELM
 
 Java is available on Anselm cluster. Activate java by loading the java module
 
diff --git a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md b/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md
index 3c37ed9b153a8f206d89352e454561befd6691c3..83d14296986946aaedb8b61e83ba6aa52ec362be 100644
--- a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md
+++ b/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md
@@ -1,6 +1,6 @@
 # Virtualization
 
-\##Running virtual machines on compute nodes
+Running virtual machines on compute nodes
 
 ## Introduction
 
@@ -26,12 +26,13 @@ Virtualization has also some drawbacks, it is not so easy to setup efficient sol
 
 Solution described in chapter [HOWTO](virtualization/#howto)  is suitable for single node tasks, does not introduce virtual machine clustering.
 
-!!! Note "Note"
+!!! Note
 	Please consider virtualization as last resort solution for your needs.
 
+!!! Warning
     Please consult use of virtualization with IT4Innovation's support.
 
-    For running Windows application (when source code and Linux native application are not available) consider use of Wine, Windows compatibility layer. Many Windows applications can be run using Wine with less effort and better performance than when using virtualization.
+For running Windows application (when source code and Linux native application are not available) consider use of Wine, Windows compatibility layer. Many Windows applications can be run using Wine with less effort and better performance than when using virtualization.
 
 ## Licensing
 
diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md b/docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md
index e2376523a96ba461422fe63335aa3bf913679bd4..2560280b66329edb7966882797e90982f915d14f 100644
--- a/docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md
+++ b/docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md
@@ -6,7 +6,7 @@ The OpenMPI programs may be executed only via the PBS Workload manager, by enter
 
 ### Basic usage
 
-!!! Note "Note"
+!!! Note
 	Use the mpiexec to run the OpenMPI code.
 
 Example:
@@ -27,7 +27,7 @@ Example:
     Hello world! from rank 3 of 4 on host cn110
 ```
 
-!!! Note "Note"
+!!! Note
 	Please be aware, that in this example, the directive **-pernode** is used to run only **one task per node**, which is normally an unwanted behaviour (unless you want to run hybrid code with just one MPI and 16 OpenMP tasks per node). In normal MPI programs **omit the -pernode directive** to run up to 16 MPI tasks per each node.
 
 In this example, we allocate 4 nodes via the express queue interactively. We set up the openmpi environment and interactively run the helloworld_mpi.x program. Note that the executable helloworld_mpi.x must be available within the
@@ -48,7 +48,7 @@ You need to preload the executable, if running on the local scratch /lscratch fi
 
 In this example, we assume the executable helloworld_mpi.x is present on compute node cn17 on local scratch. We call the mpiexec whith the **--preload-binary** argument (valid for openmpi). The mpiexec will copy the executable from cn17 to the /lscratch/15210.srv11 directory on cn108, cn109 and cn110 and execute the program.
 
-!!! Note "Note"
+!!! Note
 	MPI process mapping may be controlled by PBS parameters.
 
 The mpiprocs and ompthreads parameters allow for selection of number of running MPI processes per node as well as number of OpenMP threads per MPI process.
@@ -97,7 +97,7 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 ### OpenMP thread affinity
 
-!!! Note "Note"
+!!! Note
 	Important!  Bind every OpenMP thread to a core!
 
 In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP:
@@ -108,16 +108,16 @@ In the previous two examples with one or two MPI processes per node, the operati
 
 or this one for Intel OpenMP:
 
-````bash
-        $ export KMP_AFFINITY=granularity=fine,compact,1,0
-    ``
+```bash
+$ export KMP_AFFINITY=granularity=fine,compact,1,0
+```
 
-    As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the following variables may be used for Intel or GCC:
+As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the following variables may be used for Intel or GCC:
 
-    ```bash
-        $ export OMP_PROC_BIND=true
-        $ export OMP_PLACES=cores
-````
+```bash
+$ export OMP_PROC_BIND=true
+$ export OMP_PLACES=cores
+```
 
 ## OpenMPI Process Mapping and Binding
 
@@ -152,7 +152,7 @@ In this example, we see that ranks have been mapped on nodes according to the or
 
 Exact control of MPI process placement and resource binding is provided by specifying a rankfile
 
-!!! Note "Note"
+!!! Note
 	Appropriate binding may boost performance of your application.
 
 Example rankfile
diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
index 3738f2ec2d02bb0afd9e221be02c91fdbf24ac5b..c9237a8346d90b4e98f59ea4d9a07d473a250e3e 100644
--- a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
@@ -51,7 +51,7 @@ For example
     comm.Barrier()   # wait for everybody to synchronize
 ```
 
-\###Collective Communication with NumPy arrays
+### Collective Communication with NumPy arrays
 
 ```cpp
     from mpi4py import MPI
diff --git a/docs.it4i/salomon/software/index.md b/docs.it4i/salomon/software/index.md
deleted file mode 100644
index cebebebfeac0e20cc3679a03176f77e03c72af11..0000000000000000000000000000000000000000
--- a/docs.it4i/salomon/software/index.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Salomon Cluster Software
-
-## [Modules](../../modules-salomon)
-
--   List of Available Modules
-    ## [Compilers](compilers)
--   Available compilers, including GNU, INTEL and UPC compilers
-    ## [Intel Xeon Phi](intel-xeon-phi)
--   A guide to Intel Xeon Phi usage
-    ## [Java](java)
--   Java on the cluster
-    ## [Operating System](operating-system)
--   The operating system, deployed on Salomon cluster
-    ## Intel Suite
--   The Intel Parallel Studio XE
-    ### [Introduction](intel-suite/intel-parallel-studio-introduction)
-    ### [Intel MKL](intel-suite/intel-mkl)
-    ### [Intel Compilers](intel-suite/intel-compilers)
-    ### [Intel IPP](intel-suite/intel-integrated-performance-primitives)
-    ### [Intel TBB](intel-suite/intel-tbb)
-    ### [Intel Debugger](intel-suite/intel-debugger)
-    ### [Intel Inspector](intel-suite/intel-inspector)
-    ### [Intel Trace Analyzer and Collector](intel-suite/intel-trace-analyzer-and-collector)
-    ### [Intel Advisor](intel-suite/intel-advisor)
-    ## MPI
--   Message Passing Interface libraries
-    ### [Introduction](mpi/mpi)
-    ### [MPI4Py (MPI for Python)](mpi/mpi4py-mpi-for-python)
-    ### [Running Open MPI](mpi/Running_OpenMPI)
-    ## Debuggers
--   A collection of development tools
-    ### [Introduction](debuggers/Introduction)
-    ### [Valgrind](debuggers/valgrind)
-    ### [Allinea Forge (DDT,MAP)](debuggers/allinea-ddt)
-    ### [Total View](debuggers/total-view)
-    ### [Intel VTune Amplifier XE](debuggers/intel-vtune-amplifier)
-    ### [Aislinn](debuggers/aislinn)
-    ### [Allinea Performance Reports](debuggers/allinea-performance-reports)
-    ### [Vampir](debuggers/vampir)
-    ## Numerical Languages
--   Interpreted languages for numerical computations
-    ### [Introduction](numerical-languages/introduction)
-    ### [R](numerical-languages/r)
-    ### [MATLAB](numerical-languages/matlab)
-    ### [Octave](numerical-languages/octave)
-    ## Chemistry
--   Tools for computational chemistry
-    ### [Molpro](chemistry/molpro)
-    ### [Phono3py](chemistry/phono3py)
-    ### [NWChem](chemistry/nwchem)
-    ## COMSOL
--   A finite element analysis, solver and Simulation software
-    ### [COMSOL](comsol/comsol-multiphysics)
-    ### [Licensing and Available Versions](comsol/licensing-and-available-versions)
-    ## ANSYS
--   An engineering simulation software
-    ### [Introduction](ansys/ansys)
-    ### [Workbench](ansys/workbench)
-    ### [ANSYS CFX](ansys/ansys-cfx)
-    ### [ANSYS LS-DYNA](ansys/ansys-ls-dyna)
-    ### [ANSYS MAPDL](ansys/ansys-mechanical-apdl)
-    ### [ANSYS Fluent](ansys/ansys-fluent)
-    ### [Setting license preferences](ansys/licensing)
-    ### [Licensing and Available Versions](ansys/setting-license-preferences)
diff --git a/scripts/titlemd.py b/scripts/titlemd.py
index 84fd14b2570ca3f0864936b5f9043e4036cc040e..2fc5cb89c9cd25b4076cd7800b91fbf8b8e899c7 100755
--- a/scripts/titlemd.py
+++ b/scripts/titlemd.py
@@ -35,7 +35,7 @@ def main(location):
                       disabled = 0
               if line.startswith('#') and disabled == 0:
                   line = titlecase(line[:(line.find("]"))], callback=abbreviations)+line[(line.find("]")):] 
-              if line.startswith('---') or line.startswith('==='):
+              if (line.startswith('---') or line.startswith('===')) and disabled == 0:
                   prev_line = titlecase(prev_line[:(prev_line.find("]"))], callback=abbreviations)+prev_line[(prev_line.find("]")):]
               f.write(prev_line)
               prev_line = line
diff --git a/scripts/titlemd_test.py b/scripts/titlemd_test.py
index 0c66d4413108d53fccb07e3c8bdf766cd1badfe7..9f2d34bffe84c021e7c1883c0becbf9d970a4af9 100755
--- a/scripts/titlemd_test.py
+++ b/scripts/titlemd_test.py
@@ -41,7 +41,7 @@ def main(location):
               print("-"+line,end="") 
               print("+"+titlecase(line[:(line.find("]"))], callback=abbreviations)+line[(line.find("]")):],end="") 
               return_value = 1
-          if line.startswith('---') or line.startswith('==='):
+          if (line.startswith('---') or line.startswith('===')) and disabled == 0:
             if prev_line != titlecase(prev_line[:(prev_line.find("]"))], callback=abbreviations)+prev_line[(prev_line.find("]")):]:
               print()
               print("-"+prev_line,end="")