From 231dd723a8b2c744986ac9dc7760d9614b24bdce Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Krup=C4=8D=C3=ADk?= <lukas.krupcik@vsb.cz>
Date: Tue, 30 Oct 2018 09:07:53 +0100
Subject: [PATCH] fix links

---
 docs.it4i/anselm/capacity-computing.md        | 14 ++++++------
 docs.it4i/anselm/compute-nodes.md             |  2 +-
 docs.it4i/anselm/hardware-overview.md         | 14 ++++++------
 docs.it4i/anselm/introduction.md              |  8 +++----
 .../anselm/job-submission-and-execution.md    | 10 ++++-----
 .../anselm/resources-allocation-policy.md     |  4 ++--
 docs.it4i/anselm/shell-and-data-access.md     |  6 ++---
 docs.it4i/anselm/storage.md                   |  2 +-
 docs.it4i/environment-and-modules.md          |  4 ++--
 .../graphical-user-interface/vnc.md           |  4 ++--
 .../shell-access-and-data-transfer/putty.md   |  8 +++----
 .../accessing-the-clusters/vpn-access.md      |  2 +-
 docs.it4i/general/applying-for-resources.md   |  2 +-
 .../obtaining-login-credentials.md            |  8 +++----
 .../resource_allocation_and_job_execution.md  | 10 ++++-----
 docs.it4i/job-features.md                     |  2 +-
 docs.it4i/prace.md                            | 22 +++++++++----------
 docs.it4i/salomon/capacity-computing.md       |  2 +-
 docs.it4i/salomon/compute-nodes.md            |  2 +-
 docs.it4i/salomon/hardware-overview.md        |  6 ++---
 docs.it4i/salomon/ib-single-plane-topology.md |  4 ++--
 docs.it4i/salomon/introduction.md             |  2 +-
 docs.it4i/salomon/job-priority.md             |  2 +-
 .../salomon/job-submission-and-execution.md   |  8 +++----
 docs.it4i/salomon/network.md                  |  6 ++---
 .../salomon/resources-allocation-policy.md    |  4 ++--
 docs.it4i/salomon/shell-and-data-access.md    | 10 ++++-----
 docs.it4i/salomon/storage.md                  |  2 +-
 docs.it4i/salomon/visualization.md            |  6 ++---
 .../software/bio/omics-master/overview.md     |  8 +++----
 .../cae/comsol/comsol-multiphysics.md         |  6 ++---
 docs.it4i/software/chemistry/molpro.md        |  2 +-
 docs.it4i/software/chemistry/nwchem.md        |  2 +-
 docs.it4i/software/compilers.md               |  6 ++---
 docs.it4i/software/debuggers/Introduction.md  | 10 ++++-----
 docs.it4i/software/debuggers/allinea-ddt.md   |  2 +-
 .../debuggers/allinea-performance-reports.md  |  4 ++--
 .../intel-performance-counter-monitor.md      |  2 +-
 docs.it4i/software/debuggers/papi.md          |  2 +-
 docs.it4i/software/debuggers/scalasca.md      | 12 +++++-----
 docs.it4i/software/debuggers/score-p.md       |  6 ++---
 docs.it4i/software/debuggers/vampir.md        |  2 +-
 .../intel/intel-suite/intel-debugger.md       |  6 ++---
 .../software/intel/intel-suite/intel-mkl.md   |  2 +-
 .../intel-parallel-studio-introduction.md     | 10 ++++-----
 .../software/intel/intel-suite/intel-tbb.md   |  2 +-
 .../intel-trace-analyzer-and-collector.md     |  2 +-
 docs.it4i/software/isv_licenses.md            |  2 +-
 .../software/machine-learning/introduction.md |  2 +-
 docs.it4i/software/mic/mic_environment.md     |  6 ++---
 docs.it4i/software/mpi/mpi.md                 |  4 ++--
 .../software/mpi/mpi4py-mpi-for-python.md     |  2 +-
 docs.it4i/software/mpi/running-mpich2.md      |  2 +-
 .../numerical-languages/introduction.md       |  6 ++---
 .../software/numerical-languages/matlab.md    | 10 ++++-----
 .../numerical-languages/matlab_1314.md        |  6 ++---
 .../software/numerical-languages/octave.md    |  6 ++---
 docs.it4i/software/numerical-languages/r.md   |  8 +++----
 .../software/numerical-libraries/fftw.md      |  2 +-
 .../software/numerical-libraries/hdf5.md      |  2 +-
 .../intel-numerical-libraries.md              |  6 ++---
 docs.it4i/software/tools/ansys/ansys-cfx.md   |  2 +-
 .../software/tools/ansys/ansys-fluent.md      |  2 +-
 .../software/tools/ansys/ansys-ls-dyna.md     |  2 +-
 .../tools/ansys/ansys-mechanical-apdl.md      |  2 +-
 docs.it4i/software/tools/ansys/ls-dyna.md     |  2 +-
 docs.it4i/software/tools/virtualization.md    |  4 ++--
 docs.it4i/software/viz/openfoam.md            |  2 +-
 docs.it4i/software/viz/paraview.md            |  2 +-
 69 files changed, 172 insertions(+), 172 deletions(-)

diff --git a/docs.it4i/anselm/capacity-computing.md b/docs.it4i/anselm/capacity-computing.md
index 5057b88ac..71b31ab1f 100644
--- a/docs.it4i/anselm/capacity-computing.md
+++ b/docs.it4i/anselm/capacity-computing.md
@@ -9,13 +9,13 @@ However, executing a huge number of jobs via the PBS queue may strain the system
 !!! note
     Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time.
 
-* Use [Job arrays](/anselm/capacity-computing/#job-arrays) when running a huge number of [multithread](anselm/capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs
-* Use [GNU parallel](/anselm/capacity-computing/#gnu-parallel) when running single core jobs
-* Combine [GNU parallel with Job arrays](/anselm/capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs
+* Use [Job arrays](anselm/capacity-computing/#job-arrays) when running a huge number of [multithread](anselm/capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs
+* Use [GNU parallel](anselm/capacity-computing/#gnu-parallel) when running single core jobs
+* Combine [GNU parallel with Job arrays](anselm/capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs
 
 ## Policy
 
-1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](/anselm/capacity-computing/#job-arrays).
+1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](anselm/capacity-computing/#job-arrays).
 1. The array size is at most 1000 subjobs.
 
 ## Job Arrays
@@ -76,7 +76,7 @@ If running a huge number of parallel multicore (in means of multinode multithrea
 
 ### Submit the Job Array
 
-To submit the job array, use the qsub -J command. The 900 jobs of the [example above](/anselm/capacity-computing/#array_example) may be submitted like this:
+To submit the job array, use the qsub -J command. The 900 jobs of the [example above](anselm/capacity-computing/#array_example) may be submitted like this:
 
 ```console
 $ qsub -N JOBNAME -J 1-900 jobscript
@@ -207,7 +207,7 @@ In this example, tasks from the tasklist are executed via the GNU parallel. The
 
 ### Submit the Job
 
-To submit the job, use the qsub command. The 101 task job of the [example above](/anselm/capacity-computing/#gp_example) may be submitted as follows:
+To submit the job, use the qsub command. The 101 task job of the [example above](anselm/capacity-computing/#gp_example) may be submitted as follows:
 
 ```console
 $ qsub -N JOBNAME jobscript
@@ -292,7 +292,7 @@ When deciding this values, keep in mind the following guiding rules:
 
 ### Submit the Job Array (-J)
 
-To submit the job array, use the qsub -J command. The 992 task job of the [example above](/anselm/capacity-computing/#combined_example) may be submitted like this:
+To submit the job array, use the qsub -J command. The 992 task job of the [example above](anselm/capacity-computing/#combined_example) may be submitted like this:
 
 ```console
 $ qsub -N JOBNAME -J 1-992:32 jobscript
diff --git a/docs.it4i/anselm/compute-nodes.md b/docs.it4i/anselm/compute-nodes.md
index d27f79303..a52810e60 100644
--- a/docs.it4i/anselm/compute-nodes.md
+++ b/docs.it4i/anselm/compute-nodes.md
@@ -52,7 +52,7 @@ Anselm is cluster of x86-64 Intel based nodes built with Bull Extreme Computing
 
 ### Compute Node Summary
 
-| Node type                    | Count | Range       | Memory | Cores       | [Access](/general/resources-allocation-policy/)    |
+| Node type                    | Count | Range       | Memory | Cores       | [Access](general/resources-allocation-policy/)    |
 | ---------------------------- | ----- | ----------- | ------ | ----------- | --------------------------------------    |
 | Nodes without an accelerator | 180   | cn[1-180]   | 64GB   | 16 @ 2.4GHz | qexp, qprod, qlong, qfree, qprace, qatlas |
 | Nodes with a GPU accelerator | 23    | cn[181-203] | 96GB   | 16 @ 2.3GHz | qnvidia, qexp                             |
diff --git a/docs.it4i/anselm/hardware-overview.md b/docs.it4i/anselm/hardware-overview.md
index 7dd981ead..efcff8068 100644
--- a/docs.it4i/anselm/hardware-overview.md
+++ b/docs.it4i/anselm/hardware-overview.md
@@ -17,16 +17,16 @@ There are four types of compute nodes:
 * 4 compute nodes with a MIC accelerator - an Intel Xeon Phi 5110P
 * 2 fat nodes - equipped with 512 GB of RAM and two 100 GB SSD drives
 
-[More about Compute nodes](/anselm/compute-nodes/).
+[More about Compute nodes](anselm/compute-nodes/).
 
-GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](/anselm/resources-allocation-policy/).
+GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](anselm/resources-allocation-policy/).
 
-All of these nodes are interconnected through fast InfiniBand and Ethernet networks.  [More about the Network](/anselm/network/).
+All of these nodes are interconnected through fast InfiniBand and Ethernet networks.  [More about the Network](anselm/network/).
 Every chassis provides an InfiniBand switch, marked **isw**, connecting all nodes in the chassis, as well as connecting the chassis to the upper level switches.
 
-All of the nodes share a 360 TB /home disk for storage of user files. The 146 TB shared /scratch storage is available for scratch data. These file systems are provided by the Lustre parallel file system. There is also local disk storage available on all compute nodes in /lscratch.  [More about Storage](/anselm/storage/).
+All of the nodes share a 360 TB /home disk for storage of user files. The 146 TB shared /scratch storage is available for scratch data. These file systems are provided by the Lustre parallel file system. There is also local disk storage available on all compute nodes in /lscratch.  [More about Storage](anselm/storage/).
 
-User access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing the cluster.](/anselm/shell-and-data-access/)
+User access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing the cluster.](anselm/shell-and-data-access/)
 
 The parameters are summarized in the following tables:
 
@@ -35,7 +35,7 @@ The parameters are summarized in the following tables:
 | Primary purpose                             | High Performance Computing                   |
 | Architecture of compute nodes               | x86-64                                       |
 | Operating system                            | Linux (CentOS)                               |
-| [**Compute nodes**](/anselm/compute-nodes/)  |                                              |
+| [**Compute nodes**](anselm/compute-nodes/)  |                                              |
 | Total                                       | 209                                          |
 | Processor cores                             | 16 (2 x 8 cores)                             |
 | RAM                                         | min. 64 GB, min. 4 GB per core               |
@@ -57,4 +57,4 @@ The parameters are summarized in the following tables:
 | MIC accelerated  | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB  | Intel Xeon Phi 5110P |
 | Fat compute node | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 512 GB | -                    |
 
-For more details refer to [Compute nodes](/anselm/compute-nodes/), [Storage](anselm/storage/), and [Network](anselm/network/).
+For more details refer to [Compute nodes](anselm/compute-nodes/), [Storage](anselm/storage/), and [Network](anselm/network/).
diff --git a/docs.it4i/anselm/introduction.md b/docs.it4i/anselm/introduction.md
index 80cf1a4c3..7963784c6 100644
--- a/docs.it4i/anselm/introduction.md
+++ b/docs.it4i/anselm/introduction.md
@@ -1,11 +1,11 @@
 # Introduction
 
-Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totalling 3344 compute cores with 15 TB RAM, giving over 94 TFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64 GB of RAM, and a 500 GB hard disk drive. Nodes are interconnected through a fully non-blocking fat-tree InfiniBand network, and are equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](/anselm/hardware-overview/).
+Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totalling 3344 compute cores with 15 TB RAM, giving over 94 TFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64 GB of RAM, and a 500 GB hard disk drive. Nodes are interconnected through a fully non-blocking fat-tree InfiniBand network, and are equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](anselm/hardware-overview/).
 
-The cluster runs with an [operating system](/software/operating-system/) which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) We have installed a wide range of software packages targeted at different scientific domains. These packages are accessible via the [modules environment](environment-and-modules/).
+The cluster runs with an [operating system](software/operating-system/) which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) We have installed a wide range of software packages targeted at different scientific domains. These packages are accessible via the [modules environment](environment-and-modules/).
 
 The user data shared file-system (HOME, 320 TB) and job data shared file-system (SCRATCH, 146 TB) are available to users.
 
-The PBS Professional workload manager provides [computing resources allocations and job execution](/anselm/resources-allocation-policy/).
+The PBS Professional workload manager provides [computing resources allocations and job execution](anselm/resources-allocation-policy/).
 
-Read more on how to [apply for resources](/general/applying-for-resources/), [obtain login credentials](general/obtaining-login-credentials/obtaining-login-credentials/) and [access the cluster](/anselm/shell-and-data-access/).
+Read more on how to [apply for resources](general/applying-for-resources/), [obtain login credentials](general/obtaining-login-credentials/obtaining-login-credentials/) and [access the cluster](anselm/shell-and-data-access/).
diff --git a/docs.it4i/anselm/job-submission-and-execution.md b/docs.it4i/anselm/job-submission-and-execution.md
index 9b288b9f4..008a6c07f 100644
--- a/docs.it4i/anselm/job-submission-and-execution.md
+++ b/docs.it4i/anselm/job-submission-and-execution.md
@@ -92,9 +92,9 @@ In this example, we allocate 4 nodes, 16 cores per node, selecting only the node
 
 ### Placement by IB Switch
 
-Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](/anselm/network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and facilitates unbiased, highly efficient network communication.
+Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](anselm/network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and facilitates unbiased, highly efficient network communication.
 
-Nodes sharing the same switch may be selected via the PBS resource attribute ibswitch. Values of this attribute are iswXX, where XX is the switch number. The node-switch mapping can be seen in the [Hardware Overview](/anselm/hardware-overview/) section.
+Nodes sharing the same switch may be selected via the PBS resource attribute ibswitch. Values of this attribute are iswXX, where XX is the switch number. The node-switch mapping can be seen in the [Hardware Overview](anselm/hardware-overview/) section.
 
 We recommend allocating compute nodes to a single switch when best possible computational network performance is required to run the job efficiently:
 
@@ -373,7 +373,7 @@ exit
 
 In this example, input and executable files are assumed to be preloaded manually in the /scratch/$USER/myjob directory. Note the **mpiprocs** and **ompthreads** qsub options controlling the behavior of the MPI execution. mympiprog.x is executed as one process per node, on all 100 allocated nodes. If mympiprog.x implements OpenMP threads, it will run 16 threads per node.
 
-More information can be found in the [Running OpenMPI](/software/mpi/Running_OpenMPI/) and [Running MPICH2](software/mpi/running-mpich2/)
+More information can be found in the [Running OpenMPI](software/mpi/Running_OpenMPI/) and [Running MPICH2](software/mpi/running-mpich2/)
 sections.
 
 ### Example Jobscript for Single Node Calculation
@@ -381,7 +381,7 @@ sections.
 !!! note
     The local scratch directory is often useful for single node jobs. Local scratch memory will be deleted immediately after the job ends.
 
-Example jobscript for single node calculation, using [local scratch](/anselm/storage/) memory on the node:
+Example jobscript for single node calculation, using [local scratch](anselm/storage/) memory on the node:
 
 ```bash
 #!/bin/bash
@@ -407,4 +407,4 @@ In this example, a directory in /home holds the input file input and executable
 
 ### Other Jobscript Examples
 
-Further jobscript examples may be found in the software section and the [Capacity computing](/anselm/capacity-computing/) section.
+Further jobscript examples may be found in the software section and the [Capacity computing](anselm/capacity-computing/) section.
diff --git a/docs.it4i/anselm/resources-allocation-policy.md b/docs.it4i/anselm/resources-allocation-policy.md
index a60ad4c5e..ec94c08e2 100644
--- a/docs.it4i/anselm/resources-allocation-policy.md
+++ b/docs.it4i/anselm/resources-allocation-policy.md
@@ -2,7 +2,7 @@
 
 ## Job Queue Policies
 
-The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and the resources available to the Project. The Fair-share system of Anselm ensures that individual users may consume approximately equal amounts of resources per week. Detailed information can be found in the [Job scheduling](/anselm/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. The following table provides the queue partitioning overview:
+The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and the resources available to the Project. The Fair-share system of Anselm ensures that individual users may consume approximately equal amounts of resources per week. Detailed information can be found in the [Job scheduling](anselm/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. The following table provides the queue partitioning overview:
 
 !!! note
     Check the queue status at <https://extranet.it4i.cz/anselm/>
@@ -29,7 +29,7 @@ The resources are allocated to the job in a fair-share fashion, subject to const
 
 ## Queue Notes
 
-The job wall clock time defaults to **half the maximum time**, see the table above. Longer wall time limits can be  [set manually, see examples](/anselm/job-submission-and-execution/).
+The job wall clock time defaults to **half the maximum time**, see the table above. Longer wall time limits can be  [set manually, see examples](anselm/job-submission-and-execution/).
 
 Jobs that exceed the reserved wall clock time (Req'd Time) get killed automatically. The wall clock time limit can be changed for queuing jobs (state Q) using the qalter command, however it cannot be changed for a running job (state R).
 
diff --git a/docs.it4i/anselm/shell-and-data-access.md b/docs.it4i/anselm/shell-and-data-access.md
index ab8d2ba3c..6fc1c8f32 100644
--- a/docs.it4i/anselm/shell-and-data-access.md
+++ b/docs.it4i/anselm/shell-and-data-access.md
@@ -204,9 +204,9 @@ Now, configure the applications proxy settings to **localhost:6000**. Use port f
 
 ## Graphical User Interface
 
-* The [X Window system](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) is the principal way to get GUI access to the clusters.
-* [Virtual Network Computing](/general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer).
+* The [X Window system](general/accessing-the-clusters/graphical-user-interface/x-window-system/) is the principal way to get GUI access to the clusters.
+* [Virtual Network Computing](general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer).
 
 ## VPN Access
 
-* Access IT4Innovations internal resources via [VPN](/general/accessing-the-clusters/vpn-access/).
+* Access IT4Innovations internal resources via [VPN](general/accessing-the-clusters/vpn-access/).
diff --git a/docs.it4i/anselm/storage.md b/docs.it4i/anselm/storage.md
index 03869fdc5..b5b3fb87b 100644
--- a/docs.it4i/anselm/storage.md
+++ b/docs.it4i/anselm/storage.md
@@ -105,7 +105,7 @@ The HOME filesystem is mounted in directory /home. Users home directories /home/
 
 The HOME filesystem should not be used to archive data of past Projects or other unrelated data.
 
-The files on HOME filesystem will not be deleted until end of the [users lifecycle](/general/obtaining-login-credentials/obtaining-login-credentials/).
+The files on HOME filesystem will not be deleted until end of the [users lifecycle](general/obtaining-login-credentials/obtaining-login-credentials/).
 
 The filesystem is backed up, such that it can be restored in case of catasthropic failure resulting in significant data loss. This backup however is not intended to restore old versions of user data or to restore (accidentaly) deleted files.
 
diff --git a/docs.it4i/environment-and-modules.md b/docs.it4i/environment-and-modules.md
index 77db454c0..632a7574f 100644
--- a/docs.it4i/environment-and-modules.md
+++ b/docs.it4i/environment-and-modules.md
@@ -30,7 +30,7 @@ fi
 
 In order to configure your shell for running particular application on clusters we use Module package interface.
 
-Application modules on clusters are built using [EasyBuild](/software/tools/easybuild/). The modules are divided into the following structure:
+Application modules on clusters are built using [EasyBuild](software/tools/easybuild/). The modules are divided into the following structure:
 
 ```
  base: Default module class
@@ -61,4 +61,4 @@ Application modules on clusters are built using [EasyBuild](/software/tools/easy
 !!! note
     The modules set up the application paths, library paths and environment variables for running particular application.
 
-The modules may be loaded, unloaded and switched, according to momentary needs. For details see [here](/software/modules/lmod/).
+The modules may be loaded, unloaded and switched, according to momentary needs. For details see [here](software/modules/lmod/).
diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
index 1e116b355..864160517 100644
--- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
+++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
@@ -2,7 +2,7 @@
 
 The **Virtual Network Computing** (**VNC**) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing "Desktop sharing") system that uses the [Remote Frame Buffer protocol (RFB)](http://en.wikipedia.org/wiki/RFB_protocol "RFB protocol") to remotely control another [computer](http://en.wikipedia.org/wiki/Computer "Computer"). It transmits the [keyboard](http://en.wikipedia.org/wiki/Computer_keyboard "Computer keyboard") and [mouse](http://en.wikipedia.org/wiki/Computer_mouse") events from one computer to another, relaying the graphical [screen](http://en.wikipedia.org/wiki/Computer_screen "Computer screen") updates back in the other direction, over a [network](http://en.wikipedia.org/wiki/Computer_network "Computer network").
 
-Vnc-based connections are usually faster (require less network bandwidth) then [X11](/general/accessing-the-clusters/graphical-user-interface/x-window-system) applications forwarded directly through ssh.
+Vnc-based connections are usually faster (require less network bandwidth) then [X11](general/accessing-the-clusters/graphical-user-interface/x-window-system) applications forwarded directly through ssh.
 
 The recommended clients are [TightVNC](http://www.tightvnc.com) or [TigerVNC](http://sourceforge.net/apps/mediawiki/tigervnc/index.php?title=Main_Page) (free, open source, available for almost any platform).
 
@@ -230,7 +230,7 @@ Allow incoming X11 graphics from the compute nodes at the login node:
 $ xhost +
 ```
 
-Get an interactive session on a compute node (for more detailed info [look here](/anselm/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (16 cores in this example) from the production queue:
+Get an interactive session on a compute node (for more detailed info [look here](anselm/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (16 cores in this example) from the production queue:
 
 ```console
 $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A PROJECT_ID -q qprod -l select=1:ncpus=16
diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md
index 856dd6619..f3c725f47 100644
--- a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md
+++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md
@@ -23,7 +23,7 @@ We recommned you to download "**A Windows installer for everything except PuTTYt
 * Category - Connection -  SSH - Auth:
       Select Attempt authentication using Pageant.
       Select Allow agent forwarding.
-      Browse and select your [private key](ssh-keys/) file.
+      Browse and select your [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file.
 
 ![](../../../img/PuTTY_keyV.png)
 
@@ -36,7 +36,7 @@ We recommned you to download "**A Windows installer for everything except PuTTYt
 ![](../../../img/PuTTY_open_Salomon.png)
 
 * Enter your username if the _Host Name_ input is not in the format "username@salomon.it4i.cz".
-* Enter passphrase for selected [private key](/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file if Pageant **SSH authentication agent is not used.**
+* Enter passphrase for selected [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file if Pageant **SSH authentication agent is not used.**
 
 ## Another PuTTY Settings
 
@@ -63,7 +63,7 @@ PuTTYgen is the PuTTY key generator. You can load in an existing private key and
 
 You can change the password of your SSH key with "PuTTY Key Generator". Make sure to backup the key.
 
-* Load your [private key](/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file with _Load_ button.
+* Load your [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file with _Load_ button.
 * Enter your current passphrase.
 * Change key passphrase.
 * Confirm key passphrase.
@@ -104,4 +104,4 @@ You can generate an additional public/private key pair and insert public key int
 ![](../../../img/PuttyKeygenerator_006V.png)
 
 * Now you can insert additional public key into authorized_keys file for authentication with your own private key.
-      You must log in using ssh key received after registration. Then proceed to [How to add your own key](/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/).
+      You must log in using ssh key received after registration. Then proceed to [How to add your own key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/).
diff --git a/docs.it4i/general/accessing-the-clusters/vpn-access.md b/docs.it4i/general/accessing-the-clusters/vpn-access.md
index 228435341..3275411fc 100644
--- a/docs.it4i/general/accessing-the-clusters/vpn-access.md
+++ b/docs.it4i/general/accessing-the-clusters/vpn-access.md
@@ -15,7 +15,7 @@ It is impossible to connect to VPN from other operating systems.
 
 ## VPN Client Installation
 
-You can install VPN client from web interface after successful login with [IT4I credentials](/general/obtaining-login-credentials/obtaining-login-credentials/#login-credentials) on address [https://vpn.it4i.cz/user](https://vpn.it4i.cz/user)
+You can install VPN client from web interface after successful login with [IT4I credentials](general/obtaining-login-credentials/obtaining-login-credentials/#login-credentials) on address [https://vpn.it4i.cz/user](https://vpn.it4i.cz/user)
 
 ![](../../img/vpn_web_login.png)
 
diff --git a/docs.it4i/general/applying-for-resources.md b/docs.it4i/general/applying-for-resources.md
index a662320a9..40307695f 100644
--- a/docs.it4i/general/applying-for-resources.md
+++ b/docs.it4i/general/applying-for-resources.md
@@ -8,4 +8,4 @@ Anyone is welcomed to apply via the [Directors Discretion.](http://www.it4i.cz/o
 
 Foreign (mostly European) users can obtain computational resources via the [PRACE (DECI) program](http://www.prace-ri.eu/DECI-Projects).
 
-In all cases, IT4Innovations’ access mechanisms are aimed at distributing computational resources while taking into account the development and application of supercomputing methods and their benefits and usefulness for society. The applicants are expected to submit a proposal. In the proposal, the applicants **apply for a particular amount of core-hours** of computational resources. The requested core-hours should be substantiated by scientific excellence of the proposal, its computational maturity and expected impacts. Proposals do undergo a scientific, technical and economic evaluation. The allocation decisions are based on this evaluation. More information at [Computing resources allocation](http://www.it4i.cz/computing-resources-allocation/?lang=en) and [Obtaining Login Credentials](/general/obtaining-login-credentials/obtaining-login-credentials/) page.
+In all cases, IT4Innovations’ access mechanisms are aimed at distributing computational resources while taking into account the development and application of supercomputing methods and their benefits and usefulness for society. The applicants are expected to submit a proposal. In the proposal, the applicants **apply for a particular amount of core-hours** of computational resources. The requested core-hours should be substantiated by scientific excellence of the proposal, its computational maturity and expected impacts. Proposals do undergo a scientific, technical and economic evaluation. The allocation decisions are based on this evaluation. More information at [Computing resources allocation](http://www.it4i.cz/computing-resources-allocation/?lang=en) and [Obtaining Login Credentials](general/obtaining-login-credentials/obtaining-login-credentials/) page.
diff --git a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
index e5fc6d913..0ab2fa3a7 100644
--- a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
+++ b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
@@ -7,7 +7,7 @@ The computational resources of IT4I are allocated by the Allocation Committee to
 ![](../../img/Authorization_chain.png)
 
 !!! note
-    You need to either [become the PI](/general/applying-for-resources) or [be named as a collaborator](#authorization-by-web) by a PI in order to access and use the clusters.
+    You need to either [become the PI](general/applying-for-resources) or [be named as a collaborator](#authorization-by-web) by a PI in order to access and use the clusters.
 
 Head of Supercomputing Services acts as a PI of a project DD-13-5. Joining this project, you may **access and explore the clusters**, use software, development environment and computers via the qexp and qfree queues. You may use these resources for own education/research, no paperwork is required. All IT4I employees may contact the Head of Supercomputing Services in order to obtain **free access to the clusters**.
 
@@ -141,7 +141,7 @@ You will receive your personal login credentials by protected e-mail. The login
 1. ssh private key and private key passphrase
 1. system password
 
-The clusters are accessed by the [private key](/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) and username. Username and password is used for login to the [information systems](http://support.it4i.cz/).
+The clusters are accessed by the [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) and username. Username and password is used for login to the [information systems](http://support.it4i.cz/).
 
 ## Authorization by Web
 
@@ -192,7 +192,7 @@ On Linux, use
 local $ ssh-keygen -f id_rsa -p
 ```
 
-On Windows, use [PuTTY Key Generator](/general/accessing-the-clusters/shell-access-and-data-transfer/putty/#putty-key-generator).
+On Windows, use [PuTTY Key Generator](general/accessing-the-clusters/shell-access-and-data-transfer/putty/#putty-key-generator).
 
 ## Certificates for Digital Signatures
 
@@ -207,7 +207,7 @@ Certificate generation process for academic purposes, utilizing the CESNET certi
 
 If you are not able to obtain certificate from any of the respected certification authorities, follow the Alternative Way bellow.
 
-A FAQ about certificates can be found here: [Certificates FAQ](/general/obtaining-login-credentials/certificates-faq/).
+A FAQ about certificates can be found here: [Certificates FAQ](general/obtaining-login-credentials/certificates-faq/).
 
 ## Alternative Way to Personal Certificate
 
diff --git a/docs.it4i/general/resource_allocation_and_job_execution.md b/docs.it4i/general/resource_allocation_and_job_execution.md
index 7789368a7..81f5c9561 100644
--- a/docs.it4i/general/resource_allocation_and_job_execution.md
+++ b/docs.it4i/general/resource_allocation_and_job_execution.md
@@ -1,10 +1,10 @@
 # Resource Allocation and Job Execution
 
-To run a [job](/#terminology-frequently-used-on-these-pages), [computational resources](/salomon/resources-allocation-policy#resource-accounting-policy) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [PBS Pro User's Guide](/pbspro).
+To run a [job](/#terminology-frequently-used-on-these-pages), [computational resources](salomon/resources-allocation-policy#resource-accounting-policy) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [PBS Pro User's Guide](/pbspro).
 
 ## Resources Allocation Policy
 
-The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](/salomon/job-priority#fair-share-priority)  ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are are the most important:
+The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](salomon/job-priority#fair-share-priority)  ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are are the most important:
 
 * **qexp**, the Express queue
 * **qprod**, the Production queue
@@ -16,7 +16,7 @@ The resources are allocated to the job in a fair-share fashion, subject to const
 !!! note
     Check the queue status at [https://extranet.it4i.cz/](https://extranet.it4i.cz/)
 
-Read more on the [Resource AllocationPolicy](/salomon/resources-allocation-policy) page.
+Read more on the [Resource AllocationPolicy](salomon/resources-allocation-policy) page.
 
 ## Job Submission and Execution
 
@@ -25,7 +25,7 @@ Read more on the [Resource AllocationPolicy](/salomon/resources-allocation-polic
 
 The qsub submits the job into the queue. The qsub command creates a request to the PBS Job manager for allocation of specified resources. The **smallest allocation unit is entire node, 16 cores**, with exception of the qexp queue. The resources will be allocated when available, subject to allocation policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.**
 
-Read more on the [Job submission and execution](/salomon/job-submission-and-execution) page.
+Read more on the [Job submission and execution](salomon/job-submission-and-execution) page.
 
 ## Capacity Computing
 
@@ -36,4 +36,4 @@ Use GNU Parallel and/or Job arrays when running (many) single core jobs.
 
 In many cases, it is useful to submit huge (100+) number of computational jobs into the PBS queue system. Huge number of (small) jobs is one of the most effective ways to execute embarrassingly parallel calculations, achieving best runtime, throughput and computer utilization. In this chapter, we discuss the the recommended way to run huge number of jobs, including **ways to run huge number of single core jobs**.
 
-Read more on [Capacity computing](/salomon/capacity-computing) page.
+Read more on [Capacity computing](salomon/capacity-computing) page.
diff --git a/docs.it4i/job-features.md b/docs.it4i/job-features.md
index 94200f94c..fc7362fd8 100644
--- a/docs.it4i/job-features.md
+++ b/docs.it4i/job-features.md
@@ -44,7 +44,7 @@ Configure network for virtualization, create interconnect for fast communication
 $ qsub ... -l virt_network=true
 ```
 
-[See Tap Interconnect](/software/tools/virtualization/#tap-interconnect)
+[See Tap Interconnect](software/tools/virtualization/#tap-interconnect)
 
 ## x86 Adapt Support
 
diff --git a/docs.it4i/prace.md b/docs.it4i/prace.md
index 3b9b0893e..ceb5b53d8 100644
--- a/docs.it4i/prace.md
+++ b/docs.it4i/prace.md
@@ -2,7 +2,7 @@
 
 ## Introduction
 
-PRACE users coming to the TIER-1 systems offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](/general/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required.
+PRACE users coming to the TIER-1 systems offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](general/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required.
 
 All general [PRACE User Documentation](http://www.prace-ri.eu/user-documentation/) should be read before continuing reading the local documentation here.
 
@@ -10,13 +10,13 @@ All general [PRACE User Documentation](http://www.prace-ri.eu/user-documentation
 
 If you have any troubles, need information, request support or want to install additional software, use [PRACE Helpdesk](http://www.prace-ri.eu/helpdesk-guide264/).
 
-Information about the local services are provided in the [introduction of general user documentation Salomon](/salomon/introduction/) and [introduction of general user documentation Anselm](/anselm/introduction/). Please keep in mind, that standard PRACE accounts don't have a password to access the web interface of the local (IT4Innovations) request tracker and thus a new ticket should be created by sending an e-mail to support[at]it4i.cz.
+Information about the local services are provided in the [introduction of general user documentation Salomon](salomon/introduction/) and [introduction of general user documentation Anselm](anselm/introduction/). Please keep in mind, that standard PRACE accounts don't have a password to access the web interface of the local (IT4Innovations) request tracker and thus a new ticket should be created by sending an e-mail to support[at]it4i.cz.
 
 ## Obtaining Login Credentials
 
 In general PRACE users already have a PRACE account setup through their HOMESITE (institution from their country) as a result of rewarded PRACE project proposal. This includes signed PRACE AuP, generated and registered certificates, etc.
 
-If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on a cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so see the corresponding [section of the general documentation here](/general/obtaining-login-credentials/obtaining-login-credentials/).
+If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on a cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so see the corresponding [section of the general documentation here](general/obtaining-login-credentials/obtaining-login-credentials/).
 
 ## Accessing the Cluster
 
@@ -147,9 +147,9 @@ $ gsiscp -P 2222 anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_
 
 ### Access to X11 Applications (VNC)
 
-If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, see the [section in general documentation](/general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, see the [section in general documentation](general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
-If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH:
+If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](general/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH:
 
 ```console
 $ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961
@@ -157,11 +157,11 @@ $ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961
 
 ### Access With SSH
 
-After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information see [the section in general documentation for Salomon](/salomon/shell-and-data-access/) and [the section in general documentation for Anselm](/anselm/shell-and-data-access/).
+After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information see [the section in general documentation for Salomon](salomon/shell-and-data-access/) and [the section in general documentation for Anselm](anselm/shell-and-data-access/).
 
 ## File Transfers
 
-PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, see [the section in the general documentation for Salomon](/salomon/shell-and-data-access/) and [the section in general documentation for Anselm](/anselm/shell-and-data-access/).
+PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, see [the section in the general documentation for Salomon](salomon/shell-and-data-access/) and [the section in general documentation for Anselm](anselm/shell-and-data-access/).
 
 Apart from the standard mechanisms, for PRACE users to transfer data to/from Salomon cluster, a GridFTP server running Globus Toolkit GridFTP service is available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners).
 
@@ -302,7 +302,7 @@ Generally both shared file systems are available through GridFTP:
 | /home                   | Lustre     | Default HOME directories of users in format /home/prace/login/ |
 | /scratch                | Lustre     | Shared SCRATCH mounted on the whole cluster                    |
 
-More information about the shared file systems is available [for Salomon here](/salomon/storage/) and [for anselm here](/anselm/storage).
+More information about the shared file systems is available [for Salomon here](salomon/storage/) and [for anselm here](anselm/storage).
 
 !!! hint
     `prace` directory is used for PRACE users on the SCRATCH file system.
@@ -318,7 +318,7 @@ Only Salomon cluster /scratch:
 
 There are some limitations for PRACE user when using the cluster. By default PRACE users aren't allowed to access special queues in the PBS Pro to have high priority or exclusive access to some special equipment like accelerated nodes and high memory (fat) nodes. There may be also restrictions obtaining a working license for the commercial software installed on the cluster, mostly because of the license agreement or because of insufficient amount of licenses.
 
-For production runs always use scratch file systems. The available file systems are described [for Salomon here](/salomon/storage/) and [for Anselm here](/anselm/storage).
+For production runs always use scratch file systems. The available file systems are described [for Salomon here](salomon/storage/) and [for Anselm here](anselm/storage).
 
 ### Software, Modules and PRACE Common Production Environment
 
@@ -332,7 +332,7 @@ $ ml prace
 
 ### Resource Allocation and Job Execution
 
-General information about the resource allocation, job queuing and job execution is in this [section of general documentation for Salomon](/salomon/resources-allocation-policy/) and [section of general documentation for Anselm](/anselm/resources-allocation-policy/).
+General information about the resource allocation, job queuing and job execution is in this [section of general documentation for Salomon](salomon/resources-allocation-policy/) and [section of general documentation for Anselm](anselm/resources-allocation-policy/).
 
 For PRACE users, the default production run queue is "qprace". PRACE users can also use two other queues "qexp" and "qfree".
 
@@ -356,7 +356,7 @@ For Anselm:
 
 ### Accounting & Quota
 
-The resources that are currently subject to accounting are the core hours. The core hours are accounted on the wall clock basis. The accounting runs whenever the computational cores are allocated or blocked via the PBS Pro workload manager (the qsub command), regardless of whether the cores are actually used for any calculation. See [example in the general documentation for Salomon](/salomon/resources-allocation-policy/) and [example in the general documentation for Anselm](/anselm/resources-allocation-policy/).
+The resources that are currently subject to accounting are the core hours. The core hours are accounted on the wall clock basis. The accounting runs whenever the computational cores are allocated or blocked via the PBS Pro workload manager (the qsub command), regardless of whether the cores are actually used for any calculation. See [example in the general documentation for Salomon](salomon/resources-allocation-policy/) and [example in the general documentation for Anselm](anselm/resources-allocation-policy/).
 
 PRACE users should check their project accounting using the [PRACE Accounting Tool (DART)](http://www.prace-ri.eu/accounting-report-tool/).
 
diff --git a/docs.it4i/salomon/capacity-computing.md b/docs.it4i/salomon/capacity-computing.md
index 01ba62296..f7be485d1 100644
--- a/docs.it4i/salomon/capacity-computing.md
+++ b/docs.it4i/salomon/capacity-computing.md
@@ -147,7 +147,7 @@ Display status information for all user's subjobs.
 $ qstat -u $USER -tJ
 ```
 
-Read more on job arrays in the [PBSPro Users guide](/software/pbspro/).
+Read more on job arrays in the [PBSPro Users guide](software/pbspro/).
 
 ## GNU Parallel
 
diff --git a/docs.it4i/salomon/compute-nodes.md b/docs.it4i/salomon/compute-nodes.md
index 629a85257..8eae726d9 100644
--- a/docs.it4i/salomon/compute-nodes.md
+++ b/docs.it4i/salomon/compute-nodes.md
@@ -5,7 +5,7 @@
 Salomon is cluster of x86-64 Intel based nodes. The cluster contains two types of compute nodes of the same processor type and memory size.
 Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerators.**
 
-[More about schematic representation of the Salomon cluster compute nodes IB topology](/salomon/ib-single-plane-topology/).
+[More about schematic representation of the Salomon cluster compute nodes IB topology](salomon/ib-single-plane-topology/).
 
 ### Compute Nodes Without Accelerator
 
diff --git a/docs.it4i/salomon/hardware-overview.md b/docs.it4i/salomon/hardware-overview.md
index 4a8e68e10..59c2c42ff 100644
--- a/docs.it4i/salomon/hardware-overview.md
+++ b/docs.it4i/salomon/hardware-overview.md
@@ -4,7 +4,7 @@
 
 The Salomon cluster consists of 1008 computational nodes of which 576 are regular compute nodes and 432 accelerated nodes. Each node is a powerful x86-64 computer, equipped with 24 cores (two twelve-core Intel Xeon processors) and 128 GB RAM. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 0.5 PB /home NFS disk storage to store the user files. Users may use a DDN Lustre shared storage with capacity of 1.69 PB which is available for the scratch project data. The user access to the Salomon cluster is provided by four login nodes.
 
-[More about schematic representation of the Salomon cluster compute nodes IB topology](/salomon/ib-single-plane-topology/).
+[More about schematic representation of the Salomon cluster compute nodes IB topology](salomon/ib-single-plane-topology/).
 
 ![Salomon](../img/salomon-2.jpg)
 
@@ -17,7 +17,7 @@ The parameters are summarized in the following tables:
 | Primary purpose                             | High Performance Computing                  |
 | Architecture of compute nodes               | x86-64                                      |
 | Operating system                            | CentOS 6.x Linux                            |
-| [**Compute nodes**](/salomon/compute-nodes/) |                                             |
+| [**Compute nodes**](salomon/compute-nodes/) |                                             |
 | Totally                                     | 1008                                        |
 | Processor                                   | 2 x Intel Xeon E5-2680v3, 2.5 GHz, 12 cores |
 | RAM                                         | 128GB, 5.3 GB per core, DDR4@2133 MHz       |
@@ -36,7 +36,7 @@ The parameters are summarized in the following tables:
 | w/o accelerator | 576   | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24    | 128 GB | -                                             |
 | MIC accelerated | 432   | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24    | 128 GB | 2 x Intel Xeon Phi 7120P, 61 cores, 16 GB RAM |
 
-For more details refer to the [Compute nodes](/salomon/compute-nodes/).
+For more details refer to the [Compute nodes](salomon/compute-nodes/).
 
 ## Remote Visualization Nodes
 
diff --git a/docs.it4i/salomon/ib-single-plane-topology.md b/docs.it4i/salomon/ib-single-plane-topology.md
index 840303416..ab945605f 100644
--- a/docs.it4i/salomon/ib-single-plane-topology.md
+++ b/docs.it4i/salomon/ib-single-plane-topology.md
@@ -18,9 +18,9 @@ Each color in each physical IRU represents one dual-switch ASIC switch.
 
 ## IB Single-Plane Topology - Accelerated Nodes
 
-Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](/salomon/7d-enhanced-hypercube/).
+Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](salomon/7d-enhanced-hypercube/).
 
-As shown in a diagram [IB Topology](/salomon/7d-enhanced-hypercube/#ib-topology)
+As shown in a diagram [IB Topology](salomon/7d-enhanced-hypercube/#ib-topology)
 
 * Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack.
 * Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack.
diff --git a/docs.it4i/salomon/introduction.md b/docs.it4i/salomon/introduction.md
index e8e941d12..90625624a 100644
--- a/docs.it4i/salomon/introduction.md
+++ b/docs.it4i/salomon/introduction.md
@@ -1,6 +1,6 @@
 # Introduction
 
-Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totalling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, and at least 128 GB RAM. Nodes are interconnected through a 7D Enhanced hypercube InfiniBand network and are equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators, and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](/salomon/hardware-overview/).
+Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totalling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, and at least 128 GB RAM. Nodes are interconnected through a 7D Enhanced hypercube InfiniBand network and are equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators, and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](salomon/hardware-overview/).
 
 The cluster runs with a [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg)
 
diff --git a/docs.it4i/salomon/job-priority.md b/docs.it4i/salomon/job-priority.md
index 906d6753a..e4515f3d9 100644
--- a/docs.it4i/salomon/job-priority.md
+++ b/docs.it4i/salomon/job-priority.md
@@ -72,6 +72,6 @@ Specifying more accurate walltime enables better scheduling, better execution ti
 
 ### Job Placement
 
-Job [placement can be controlled by flags during submission](/salomon/job-submission-and-execution/#job_placement).
+Job [placement can be controlled by flags during submission](salomon/job-submission-and-execution/#job_placement).
 
 ---8<--- "mathjax.md"
diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md
index 7d4a86a0d..ee87ddcf2 100644
--- a/docs.it4i/salomon/job-submission-and-execution.md
+++ b/docs.it4i/salomon/job-submission-and-execution.md
@@ -102,7 +102,7 @@ exec_vnode = (r21u05n581-mic0:naccelerators=1:ncpus=0)
     Per NUMA node allocation.
     Jobs are isolated by cpusets.
 
-The UV2000 (node uv1) offers 3TB of RAM and 104 cores, distributed in 13 NUMA nodes. A NUMA node packs 8 cores and approx. 247GB RAM (with exception, node 11 has only 123GB RAM). In the PBS the UV2000 provides 13 chunks, a chunk per NUMA node (see [Resource allocation policy](/salomon/resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself.
+The UV2000 (node uv1) offers 3TB of RAM and 104 cores, distributed in 13 NUMA nodes. A NUMA node packs 8 cores and approx. 247GB RAM (with exception, node 11 has only 123GB RAM). In the PBS the UV2000 provides 13 chunks, a chunk per NUMA node (see [Resource allocation policy](salomon/resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself.
 
 ```console
  $ qsub -A OPEN-0-0 -q qfat -l select=13 ./myjob
@@ -165,7 +165,7 @@ In this example, we allocate nodes r24u35n680 and r24u36n681, all 24 cores per n
 
 ### Placement by Network Location
 
-Network location of allocated nodes in the [InifiBand network](/salomon/network/) influences efficiency of network communication between nodes of job. Nodes on the same InifiBand switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology.
+Network location of allocated nodes in the [InifiBand network](salomon/network/) influences efficiency of network communication between nodes of job. Nodes on the same InifiBand switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology.
 
 For communication intensive jobs it is possible to set stricter requirement - to require nodes directly connected to the same InifiBand switch or to require nodes located in the same dimension group of the InifiBand network.
 
@@ -238,7 +238,7 @@ Nodes located in the same dimension group may be allocated using node grouping o
 | 6D                  | ehc_6d         | 432,576          |
 | 7D                  | ehc_7d         | all              |
 
-In this example, we allocate 16 nodes in the same [hypercube dimension](/salomon/7d-enhanced-hypercube/) 1 group.
+In this example, we allocate 16 nodes in the same [hypercube dimension](salomon/7d-enhanced-hypercube/) 1 group.
 
 ```console
 $ qsub -A OPEN-0-0 -q qprod -l select=16:ncpus=24 -l place=group=ehc_1d -I
@@ -516,7 +516,7 @@ HTML commented section #2 (examples need to be reworked)
 !!! note
     Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. Be very careful, use of RAM disk filesystem is at the expense of operational memory.
 
-Example jobscript for single node calculation, using [local scratch](/salomon/storage/) on the node:
+Example jobscript for single node calculation, using [local scratch](salomon/storage/) on the node:
 
 ```bash
 #!/bin/bash
diff --git a/docs.it4i/salomon/network.md b/docs.it4i/salomon/network.md
index 13d5db94c..252fe034a 100644
--- a/docs.it4i/salomon/network.md
+++ b/docs.it4i/salomon/network.md
@@ -5,10 +5,10 @@ network. Only [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network may
 
 ## InfiniBand Network
 
-All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](/salomon/7d-enhanced-hypercube/).
+All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](salomon/7d-enhanced-hypercube/).
 
-Read more about schematic representation of the Salomon cluster [IB single-plain topology](/salomon/ib-single-plane-topology/)
-([hypercube dimension](/salomon/7d-enhanced-hypercube/)).
+Read more about schematic representation of the Salomon cluster [IB single-plain topology](salomon/ib-single-plane-topology/)
+([hypercube dimension](salomon/7d-enhanced-hypercube/)).
 
 The compute nodes may be accessed via the Infiniband network using ib0 network interface, in address range 10.17.0.0 (mask 255.255.224.0). The MPI may be used to establish native Infiniband connection among the nodes.
 
diff --git a/docs.it4i/salomon/resources-allocation-policy.md b/docs.it4i/salomon/resources-allocation-policy.md
index 01b070b25..54a231a94 100644
--- a/docs.it4i/salomon/resources-allocation-policy.md
+++ b/docs.it4i/salomon/resources-allocation-policy.md
@@ -2,7 +2,7 @@
 
 ## Job Queue Policies
 
-The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](/salomon/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview:
+The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](salomon/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview:
 
 !!! note
     Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/>
@@ -35,7 +35,7 @@ The resources are allocated to the job in a fair-share fashion, subject to const
 
 ## Queue Notes
 
-The job wall-clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be  [set manually, see examples](/salomon/job-submission-and-execution/).
+The job wall-clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be  [set manually, see examples](salomon/job-submission-and-execution/).
 
 Jobs that exceed the reserved wall-clock time (Req'd Time) get killed automatically. Wall-clock time limit can be changed for queuing jobs (state Q) using the qalter command, however can not be changed for a running job (state R).
 
diff --git a/docs.it4i/salomon/shell-and-data-access.md b/docs.it4i/salomon/shell-and-data-access.md
index 0183662f0..e7fd6b57f 100644
--- a/docs.it4i/salomon/shell-and-data-access.md
+++ b/docs.it4i/salomon/shell-and-data-access.md
@@ -65,7 +65,7 @@ Last login: Tue Jul 9 15:57:38 2018 from your-host.example.com
 ```
 
 !!! note
-    The environment is **not** shared between login nodes, except for [shared filesystems](/salomon/storage/).
+    The environment is **not** shared between login nodes, except for [shared filesystems](salomon/storage/).
 
 ## Data Transfer
 
@@ -79,7 +79,7 @@ Data in and out of the system may be transferred by the [scp](http://en.wikipedi
 | login3.salomon.it4i.cz | 22   | scp, sftp |
 | login4.salomon.it4i.cz | 22   | scp, sftp |
 
-The authentication is by the [private key](/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/)
+The authentication is by the [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/)
 
 On linux or Mac, use scp or sftp client to transfer the data to Salomon:
 
@@ -115,7 +115,7 @@ $ man sshfs
 
 On Windows, use [WinSCP client](http://winscp.net/eng/download.php) to transfer the data. The [win-sshfs client](http://code.google.com/p/win-sshfs/) provides a way to mount the Salomon filesystems directly as an external disc.
 
-More information about the shared file systems is available [here](/salomon/storage/).
+More information about the shared file systems is available [here](salomon/storage/).
 
 ## Connection Restrictions
 
@@ -199,9 +199,9 @@ Now, configure the applications proxy settings to **localhost:6000**. Use port f
 
 ## Graphical User Interface
 
-* The [X Window system](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters.
+* The [X Window system](general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters.
 * The [Virtual Network Computing](../general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer).
 
 ## VPN Access
 
-* Access to IT4Innovations internal resources via [VPN](/general/accessing-the-clusters/vpn-access/).
+* Access to IT4Innovations internal resources via [VPN](general/accessing-the-clusters/vpn-access/).
diff --git a/docs.it4i/salomon/storage.md b/docs.it4i/salomon/storage.md
index 84506c38e..53336d648 100644
--- a/docs.it4i/salomon/storage.md
+++ b/docs.it4i/salomon/storage.md
@@ -235,7 +235,7 @@ Users home directories /home/username reside on HOME file system. Accessible cap
 
 The HOME should not be used to archive data of past Projects or other unrelated data.
 
-The files on HOME will not be deleted until end of the [users lifecycle](/general/obtaining-login-credentials/obtaining-login-credentials/).
+The files on HOME will not be deleted until end of the [users lifecycle](general/obtaining-login-credentials/obtaining-login-credentials/).
 
 The workspace is backed up, such that it can be restored in case of catasthropic failure resulting in significant data loss. This backup however is not intended to restore old versions of user data or to restore (accidentaly) deleted files.
 
diff --git a/docs.it4i/salomon/visualization.md b/docs.it4i/salomon/visualization.md
index 88a1d5e0e..769f7a024 100644
--- a/docs.it4i/salomon/visualization.md
+++ b/docs.it4i/salomon/visualization.md
@@ -14,8 +14,8 @@ Remote visualization with NICE DCV software is availabe on two nodes.
 
 ## References
 
-* [Graphical User Interface](/salomon/shell-and-data-access/#graphical-user-interface)
-* [VPN Access](/salomon/shell-and-data-access/#vpn-access)
+* [Graphical User Interface](salomon/shell-and-data-access/#graphical-user-interface)
+* [VPN Access](salomon/shell-and-data-access/#vpn-access)
 
 ## Install and Run
 
@@ -25,7 +25,7 @@ Remote visualization with NICE DCV software is availabe on two nodes.
 * [Linux download](http://www.nice-software.com/storage/nice-dcv/2016.0/endstation/linux/nice-dcv-endstation-2016.0-17066.run)
 * [Windows download](http://www.nice-software.com/storage/nice-dcv/2016.0/endstation/win/nice-dcv-endstation-2016.0-17066-Release.msi)
 
-**Install VPN client** [VPN Access](/general/accessing-the-clusters/vpn-access/) (user-computer)
+**Install VPN client** [VPN Access](general/accessing-the-clusters/vpn-access/) (user-computer)
 
 !!! note
     Visualisation server is a compute node. You are not able to SSH with your private key. There are two solutions available to solve login issue.
diff --git a/docs.it4i/software/bio/omics-master/overview.md b/docs.it4i/software/bio/omics-master/overview.md
index 0831fc0af..c80680308 100644
--- a/docs.it4i/software/bio/omics-master/overview.md
+++ b/docs.it4i/software/bio/omics-master/overview.md
@@ -165,9 +165,9 @@ Systems biology
 We also import systems biology information like interactome information from IntAct (24). Reactome (25) stores pathway and interaction information in BioPAX (26) format. BioPAX data exchange format enables the integration of diverse pathway
 resources. We successfully solved the problem of storing data released in BioPAX format into a SQL relational schema, which allowed us importing Reactome in CellBase.
 
-### [Diagnostic Component (TEAM)](/software/bio/omics-master/diagnostic-component-team/)
+### [Diagnostic Component (TEAM)](software/bio/omics-master/diagnostic-component-team/)
 
-### [Priorization Component (BiERApp)](/software/bio/omics-master/priorization-component-bierapp/)
+### [Priorization Component (BiERApp)](software/bio/omics-master/priorization-component-bierapp/)
 
 ## Usage
 
@@ -262,7 +262,7 @@ The ped file ( file.ped) contains the following info:
     FAM sample_B 0 0 2 2
 ```
 
-Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](/salomon/storage/):
+Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](salomon/storage/):
 
 ```console
 $ ml ngsPipeline
@@ -276,7 +276,7 @@ Now, we can launch the pipeline (replace OPEN-0-0 with your Project ID):
 $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped --project OPEN-0-0 --queue qprod
 ```
 
-This command submits the processing [jobs to the queue](/salomon/job-submission-and-execution/).
+This command submits the processing [jobs to the queue](salomon/job-submission-and-execution/).
 
 If we want to re-launch the pipeline from stage 4 until stage 20 we should use the next command:
 
diff --git a/docs.it4i/software/cae/comsol/comsol-multiphysics.md b/docs.it4i/software/cae/comsol/comsol-multiphysics.md
index efda687f1..f60a1be85 100644
--- a/docs.it4i/software/cae/comsol/comsol-multiphysics.md
+++ b/docs.it4i/software/cae/comsol/comsol-multiphysics.md
@@ -18,7 +18,7 @@ On the clusters COMSOL is available in the latest stable version. There are two
 
 * **Non commercial** or so called **EDU variant**, which can be used for research and educational purposes.
 
-* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing [here](/software/cae/comsol/licensing-and-available-versions/).
+* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing [here](software/cae/comsol/licensing-and-available-versions/).
 
 To load the of COMSOL load the module
 
@@ -32,7 +32,7 @@ By default the **EDU variant** will be loaded. If user needs other version or va
 $ ml av COMSOL
 ```
 
-If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](/general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
 Example for Salomon:
 
@@ -76,7 +76,7 @@ Working directory has to be created before sending the (comsol.pbs) job script i
 
 COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB.
 
-LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (see the [ISV Licenses](/software/isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode (on Anselm use 16 threads).
+LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (see the [ISV Licenses](software/isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode (on Anselm use 16 threads).
 
 ```console
 $ xhost +
diff --git a/docs.it4i/software/chemistry/molpro.md b/docs.it4i/software/chemistry/molpro.md
index fe3863d67..b6d157164 100644
--- a/docs.it4i/software/chemistry/molpro.md
+++ b/docs.it4i/software/chemistry/molpro.md
@@ -35,7 +35,7 @@ Molpro is compiled for parallel execution using MPI and OpenMP. By default, Molp
 !!! note
     The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS.
 
-You are advised to use the -d option to point to a directory in [SCRATCH file system - Salomon](/salomon/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system.
+You are advised to use the -d option to point to a directory in [SCRATCH file system - Salomon](salomon/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system.
 
 ### Example jobscript
 
diff --git a/docs.it4i/software/chemistry/nwchem.md b/docs.it4i/software/chemistry/nwchem.md
index 1a3e1d944..3c7a1ca5f 100644
--- a/docs.it4i/software/chemistry/nwchem.md
+++ b/docs.it4i/software/chemistry/nwchem.md
@@ -33,4 +33,4 @@ mpirun nwchem h2o.nw
 Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives :
 
 * MEMORY : controls the amount of memory NWChem will use
-* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem - Salomon](/salomon/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct"
+* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem - Salomon](salomon/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct"
diff --git a/docs.it4i/software/compilers.md b/docs.it4i/software/compilers.md
index baf950b5a..0aa3a56c9 100644
--- a/docs.it4i/software/compilers.md
+++ b/docs.it4i/software/compilers.md
@@ -24,7 +24,7 @@ Commercial licenses:
 
 ## Intel Compilers
 
-For information about the usage of Intel Compilers and other Intel products, read the [Intel Parallel studio](/software/intel-suite/intel-compilers/) page.
+For information about the usage of Intel Compilers and other Intel products, read the [Intel Parallel studio](software/intel-suite/intel-compilers/) page.
 
 ## PGI Compilers (Only on Salomon)
 
@@ -187,8 +187,8 @@ For more information see the man pages.
 
 ## Java
 
-For information how to use Java (runtime and/or compiler), read the [Java page](/software/java/).
+For information how to use Java (runtime and/or compiler), read the [Java page](software/java/).
 
 ## NVIDIA CUDA
 
-For information how to work with NVIDIA CUDA, read the [NVIDIA CUDA page](/anselm/software/nvidia-cuda/).
+For information how to work with NVIDIA CUDA, read the [NVIDIA CUDA page](anselm/software/nvidia-cuda/).
diff --git a/docs.it4i/software/debuggers/Introduction.md b/docs.it4i/software/debuggers/Introduction.md
index 2eaa90335..947da202b 100644
--- a/docs.it4i/software/debuggers/Introduction.md
+++ b/docs.it4i/software/debuggers/Introduction.md
@@ -15,7 +15,7 @@ $ ml intel
 $ idb
 ```
 
-Read more at the [Intel Debugger](/software/intel/intel-suite/intel-debugger/) page.
+Read more at the [Intel Debugger](software/intel/intel-suite/intel-debugger/) page.
 
 ## Allinea Forge (DDT/MAP)
 
@@ -26,7 +26,7 @@ $ ml Forge
 $ forge
 ```
 
-Read more at the [Allinea DDT](/software/debuggers/allinea-ddt/) page.
+Read more at the [Allinea DDT](software/debuggers/allinea-ddt/) page.
 
 ## Allinea Performance Reports
 
@@ -37,7 +37,7 @@ $ ml PerformanceReports/6.0
 $ perf-report mpirun -n 64 ./my_application argument01 argument02
 ```
 
-Read more at the [Allinea Performance Reports](/software/debuggers/allinea-performance-reports/) page.
+Read more at the [Allinea Performance Reports](software/debuggers/allinea-performance-reports/) page.
 
 ## RougeWave Totalview
 
@@ -48,7 +48,7 @@ $ ml TotalView/8.15.4-6-linux-x86-64
 $ totalview
 ```
 
-Read more at the [Totalview](/software/debuggers/total-view/) page.
+Read more at the [Totalview](software/debuggers/total-view/) page.
 
 ## Vampir Trace Analyzer
 
@@ -59,4 +59,4 @@ Vampir is a GUI trace analyzer for traces in OTF format.
     $ vampir
 ```
 
-Read more at the [Vampir](/software/debuggers/vampir/) page.
+Read more at the [Vampir](software/debuggers/vampir/) page.
diff --git a/docs.it4i/software/debuggers/allinea-ddt.md b/docs.it4i/software/debuggers/allinea-ddt.md
index 67bfdff18..984091fad 100644
--- a/docs.it4i/software/debuggers/allinea-ddt.md
+++ b/docs.it4i/software/debuggers/allinea-ddt.md
@@ -59,7 +59,7 @@ Be sure to log in with an X window forwarding enabled. This could mean using the
 $ ssh -X username@anselm.it4i.cz
 ```
 
-Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)
+Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](general/accessing-the-clusters/graphical-user-interface/x-window-system/)
 
 From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command:
 
diff --git a/docs.it4i/software/debuggers/allinea-performance-reports.md b/docs.it4i/software/debuggers/allinea-performance-reports.md
index 6484c1b9d..cc684a10d 100644
--- a/docs.it4i/software/debuggers/allinea-performance-reports.md
+++ b/docs.it4i/software/debuggers/allinea-performance-reports.md
@@ -22,13 +22,13 @@ The module sets up environment variables, required for using the Allinea Perform
 
 Use the the perf-report wrapper on your (MPI) program.
 
-Instead of [running your MPI program the usual way](/software/mpi/mpi/), use the the perf report wrapper:
+Instead of [running your MPI program the usual way](software/mpi/mpi/), use the the perf report wrapper:
 
 ```console
 $ perf-report mpirun ./mympiprog.x
 ```
 
-The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](/salomon/job-submission-and-execution/).
+The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](salomon/job-submission-and-execution/).
 
 ## Example
 
diff --git a/docs.it4i/software/debuggers/intel-performance-counter-monitor.md b/docs.it4i/software/debuggers/intel-performance-counter-monitor.md
index bbc08292b..b5da7bf60 100644
--- a/docs.it4i/software/debuggers/intel-performance-counter-monitor.md
+++ b/docs.it4i/software/debuggers/intel-performance-counter-monitor.md
@@ -2,7 +2,7 @@
 
 ## Introduction
 
-Intel PCM (Performance Counter Monitor) is a tool to monitor performance hardware counters on Intel>® processors, similar to [PAPI](/software/debuggers/papi/). The difference between PCM and PAPI is that PCM supports only Intel hardware, but PCM can monitor also uncore metrics, like memory controllers and QuickPath Interconnect links.
+Intel PCM (Performance Counter Monitor) is a tool to monitor performance hardware counters on Intel>® processors, similar to [PAPI](software/debuggers/papi/). The difference between PCM and PAPI is that PCM supports only Intel hardware, but PCM can monitor also uncore metrics, like memory controllers and QuickPath Interconnect links.
 
 ## Installed Version
 
diff --git a/docs.it4i/software/debuggers/papi.md b/docs.it4i/software/debuggers/papi.md
index 4fcc35a65..8361776db 100644
--- a/docs.it4i/software/debuggers/papi.md
+++ b/docs.it4i/software/debuggers/papi.md
@@ -193,7 +193,7 @@ $ ./matrix
 !!! note
     PAPI currently supports only a subset of counters on the Intel Xeon Phi processor compared to Intel Xeon, for example the floating point operations counter is missing.
 
-To use PAPI in [Intel Xeon Phi](/software/intel/intel-xeon-phi-salomon/) native applications, you need to load module with " -mic" suffix, for example " papi/5.3.2-mic" :
+To use PAPI in [Intel Xeon Phi](software/intel/intel-xeon-phi-salomon/) native applications, you need to load module with " -mic" suffix, for example " papi/5.3.2-mic" :
 
 ```console
 $ ml papi/5.3.2-mic
diff --git a/docs.it4i/software/debuggers/scalasca.md b/docs.it4i/software/debuggers/scalasca.md
index 76286508d..066076b88 100644
--- a/docs.it4i/software/debuggers/scalasca.md
+++ b/docs.it4i/software/debuggers/scalasca.md
@@ -10,8 +10,8 @@ Scalasca supports profiling of MPI, OpenMP and hybrid MPI+OpenMP applications.
 
 There are currently two versions of Scalasca 2.0 [modules](modules-matrix/) installed on Anselm:
 
-* scalasca2/2.0-gcc-openmpi, for usage with [GNU Compiler](/software/compilers/) and [OpenMPI](software/mpi/Running_OpenMPI/),
-* scalasca2/2.0-icc-impi, for usage with [Intel Compiler](/software/compilers/) and [Intel MPI](software/mpi/running-mpich2/).
+* scalasca2/2.0-gcc-openmpi, for usage with [GNU Compiler](software/compilers/) and [OpenMPI](software/mpi/Running_OpenMPI/),
+* scalasca2/2.0-icc-impi, for usage with [Intel Compiler](software/compilers/) and [Intel MPI](software/mpi/running-mpich2/).
 
 ## Usage
 
@@ -23,7 +23,7 @@ Profiling a parallel application with Scalasca consists of three steps:
 
 ### Instrumentation
 
-Instrumentation via `scalasca -instrument` is discouraged. Use [Score-P instrumentation](/software/debuggers/score-p/).
+Instrumentation via `scalasca -instrument` is discouraged. Use [Score-P instrumentation](software/debuggers/score-p/).
 
 ### Runtime Measurement
 
@@ -43,11 +43,11 @@ Some notable Scalasca options are:
 * **-e &lt;directory> Specify a directory to save the collected data to. By default, Scalasca saves the data to a directory with prefix scorep\_, followed by name of the executable and launch configuration.**
 
 !!! note
-    Scalasca can generate a huge amount of data, especially if tracing is enabled. Please consider saving the data to a [scratch directory](/salomon/storage/).
+    Scalasca can generate a huge amount of data, especially if tracing is enabled. Please consider saving the data to a [scratch directory](salomon/storage/).
 
 ### Analysis of Reports
 
-For the analysis, you must have [Score-P](/software/debuggers/score-p/) and [CUBE](software/debuggers/cube/) modules loaded. The analysis is done in two steps, first, the data is preprocessed and then CUBE GUI tool is launched.
+For the analysis, you must have [Score-P](software/debuggers/score-p/) and [CUBE](software/debuggers/cube/) modules loaded. The analysis is done in two steps, first, the data is preprocessed and then CUBE GUI tool is launched.
 
 To launch the analysis, run :
 
@@ -63,7 +63,7 @@ scalasca -examine -s <experiment_directory>
 
 Alternatively you can open CUBE and load the data directly from here. Keep in mind that in that case the pre-processing is not done and not all metrics will be shown in the viewer.
 
-Refer to [CUBE documentation](/software/debuggers/cube/) on usage of the GUI viewer.
+Refer to [CUBE documentation](software/debuggers/cube/) on usage of the GUI viewer.
 
 ## References
 
diff --git a/docs.it4i/software/debuggers/score-p.md b/docs.it4i/software/debuggers/score-p.md
index 4fce492a0..afb55bc3b 100644
--- a/docs.it4i/software/debuggers/score-p.md
+++ b/docs.it4i/software/debuggers/score-p.md
@@ -4,14 +4,14 @@
 
 The [Score-P measurement infrastructure](http://www.vi-hps.org/projects/score-p/) is a highly scalable and easy-to-use tool suite for profiling, event tracing, and online analysis of HPC applications.
 
-Score-P can be used as an instrumentation tool for [Scalasca](/software/debuggers/scalasca/).
+Score-P can be used as an instrumentation tool for [Scalasca](software/debuggers/scalasca/).
 
 ## Installed Versions
 
 There are currently two versions of Score-P version 1.2.6 [modules](modules-matrix/) installed on Anselm :
 
-* scorep/1.2.3-gcc-openmpi, for usage     with [GNU Compiler](/software/compilers/) and [OpenMPI](software/mpi/Running_OpenMPI/)
-* scorep/1.2.3-icc-impi, for usage with [Intel Compiler](/software/compilers/)> and [Intel MPI](software/mpi/running-mpich2/)>.
+* scorep/1.2.3-gcc-openmpi, for usage     with [GNU Compiler](software/compilers/) and [OpenMPI](software/mpi/Running_OpenMPI/)
+* scorep/1.2.3-icc-impi, for usage with [Intel Compiler](software/compilers/)> and [Intel MPI](software/mpi/running-mpich2/)>.
 
 ## Instrumentation
 
diff --git a/docs.it4i/software/debuggers/vampir.md b/docs.it4i/software/debuggers/vampir.md
index e1c880f4d..3a1e9c2ee 100644
--- a/docs.it4i/software/debuggers/vampir.md
+++ b/docs.it4i/software/debuggers/vampir.md
@@ -1,6 +1,6 @@
 # Vampir
 
-Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](/software/debuggers/score-p/)) first to collect the traces.
+Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](software/debuggers/score-p/)) first to collect the traces.
 
 ![](../../img/Snmekobrazovky20160708v12.33.35.png)
 
diff --git a/docs.it4i/software/intel/intel-suite/intel-debugger.md b/docs.it4i/software/intel/intel-suite/intel-debugger.md
index bbb68a615..db2367535 100644
--- a/docs.it4i/software/intel/intel-suite/intel-debugger.md
+++ b/docs.it4i/software/intel/intel-suite/intel-debugger.md
@@ -4,7 +4,7 @@ IDB is no longer available since Intel Parallel Studio 2015
 
 ## Debugging Serial Applications
 
-The intel debugger version is available, via module intel/13.5.192. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI.
+The intel debugger version is available, via module intel/13.5.192. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](general/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI.
 
 ```console
 $ ml intel/13.5.192
@@ -18,7 +18,7 @@ The debugger may run in text mode. To debug in text mode, use
 $ idbc
 ```
 
-To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)
+To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](general/accessing-the-clusters/graphical-user-interface/x-window-system/)
 
 Example:
 
@@ -40,7 +40,7 @@ In this example, we allocate 1 full compute node, compile program myprog.c with
 
 ### Small Number of MPI Ranks
 
-For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way:
+For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](general/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way:
 
 ```console
 $ qsub -q qexp -l select=2:ncpus=24 -X -I
diff --git a/docs.it4i/software/intel/intel-suite/intel-mkl.md b/docs.it4i/software/intel/intel-suite/intel-mkl.md
index ceff86201..cc2a80c55 100644
--- a/docs.it4i/software/intel/intel-suite/intel-mkl.md
+++ b/docs.it4i/software/intel/intel-suite/intel-mkl.md
@@ -109,7 +109,7 @@ In this example, we compile, link and run the cblas_dgemm example, using LP64 in
 
 ## MKL and MIC Accelerators
 
-The Intel MKL is capable to automatically offload the computations o the MIC accelerator. See section [Intel Xeon Phi](/software/intel/intel-xeon-phi-salomon/) for details.
+The Intel MKL is capable to automatically offload the computations o the MIC accelerator. See section [Intel Xeon Phi](software/intel/intel-xeon-phi-salomon/) for details.
 
 ## LAPACKE C Interface
 
diff --git a/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md b/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md
index 3306cf687..264b15e9d 100644
--- a/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md
+++ b/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md
@@ -23,7 +23,7 @@ $ icc -v
 $ ifort -v
 ```
 
-Read more at the [Intel Compilers](/software/intel/intel-suite/intel-compilers/) page.
+Read more at the [Intel Compilers](software/intel/intel-suite/intel-compilers/) page.
 
 ## Intel Debugger
 
@@ -36,7 +36,7 @@ $ ml intel
 $ idb
 ```
 
-Read more at the [Intel Debugger](/software/intel/intel-suite/intel-debugger/) page.
+Read more at the [Intel Debugger](software/intel/intel-suite/intel-debugger/) page.
 
 ## Intel Math Kernel Library
 
@@ -46,7 +46,7 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e
 $ ml imkl
 ```
 
-Read more at the [Intel MKL](/software/intel/intel-suite/intel-mkl/) page.
+Read more at the [Intel MKL](software/intel/intel-suite/intel-mkl/) page.
 
 ## Intel Integrated Performance Primitives
 
@@ -56,7 +56,7 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai
 $ ml ipp
 ```
 
-Read more at the [Intel IPP](/software/intel/intel-suite/intel-integrated-performance-primitives/) page.
+Read more at the [Intel IPP](software/intel/intel-suite/intel-integrated-performance-primitives/) page.
 
 ## Intel Threading Building Blocks
 
@@ -66,4 +66,4 @@ Intel Threading Building Blocks (Intel TBB) is a library that supports scalable
 $ ml tbb
 ```
 
-Read more at the [Intel TBB](/software/intel/intel-suite/intel-tbb/) page.
+Read more at the [Intel TBB](software/intel/intel-suite/intel-tbb/) page.
diff --git a/docs.it4i/software/intel/intel-suite/intel-tbb.md b/docs.it4i/software/intel/intel-suite/intel-tbb.md
index d04d44841..d28a92d24 100644
--- a/docs.it4i/software/intel/intel-suite/intel-tbb.md
+++ b/docs.it4i/software/intel/intel-suite/intel-tbb.md
@@ -2,7 +2,7 @@
 
 ## Intel Threading Building Blocks
 
-Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers.  To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](/software/intel//intel-xeon-phi-salomon/).
+Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers.  To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](software/intel//intel-xeon-phi-salomon/).
 
 Intel is available on the cluster.
 
diff --git a/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md b/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md
index cecfbe096..7a86f2d0a 100644
--- a/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md
+++ b/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md
@@ -21,7 +21,7 @@ The trace will be saved in file myapp.stf in the current directory.
 
 ## Viewing Traces
 
-To view and analyze the trace, open ITAC GUI in a [graphical environment](/general/accessing-the-clusters/graphical-user-interface/x-window-system/):
+To view and analyze the trace, open ITAC GUI in a [graphical environment](general/accessing-the-clusters/graphical-user-interface/x-window-system/):
 
 ```console
 $ ml itac/9.1.2.024
diff --git a/docs.it4i/software/isv_licenses.md b/docs.it4i/software/isv_licenses.md
index ef4e1177b..d7cbb7cc9 100644
--- a/docs.it4i/software/isv_licenses.md
+++ b/docs.it4i/software/isv_licenses.md
@@ -68,7 +68,7 @@ Names of applications (APP):
     matlab-edu
 ```
 
-To get the FEATUREs of a license take a look into the corresponding state file ([see above](/software/isv_licenses/#Licence)), or use:
+To get the FEATUREs of a license take a look into the corresponding state file ([see above](software/isv_licenses/#Licence)), or use:
 
 ### Application and List of Provided Features
 
diff --git a/docs.it4i/software/machine-learning/introduction.md b/docs.it4i/software/machine-learning/introduction.md
index 4058388d6..5c7288676 100644
--- a/docs.it4i/software/machine-learning/introduction.md
+++ b/docs.it4i/software/machine-learning/introduction.md
@@ -16,7 +16,7 @@ Test module:
 $ ml Tensorflow
 ```
 
-Read more about available versions at the [TensorFlow page](/software/machine-learning/tensorflow/).
+Read more about available versions at the [TensorFlow page](software/machine-learning/tensorflow/).
 
 ## Theano
 
diff --git a/docs.it4i/software/mic/mic_environment.md b/docs.it4i/software/mic/mic_environment.md
index 92a439a2e..7f36f256a 100644
--- a/docs.it4i/software/mic/mic_environment.md
+++ b/docs.it4i/software/mic/mic_environment.md
@@ -1,12 +1,12 @@
 # Intel Xeon Phi Environment
 
-Intel Xeon Phi (so-called MIC) accelerator can be used in several modes ([Offload](/software/intel/intel-xeon-phi-salomon/#offload-mode) and [Native](#native-mode)). The default mode on the cluster is offload mode, but all modes described in this document are supported.
+Intel Xeon Phi (so-called MIC) accelerator can be used in several modes ([Offload](software/intel/intel-xeon-phi-salomon/#offload-mode) and [Native](#native-mode)). The default mode on the cluster is offload mode, but all modes described in this document are supported.
 
 See sections below for more details.
 
 ## Intel Utilities for Xeon Phi
 
-Continue [here](/software/intel/intel-xeon-phi-salomon/)
+Continue [here](software/intel/intel-xeon-phi-salomon/)
 
 ## GCC With [KNC](https://en.wikipedia.org/wiki/Xeon_Phi) Support
 
@@ -434,4 +434,4 @@ Configure step (for `configure`,`make` and `make install` software)
 
 Modulefile and Lmod
 
-* Read [Lmod](/software/modules/lmod/)
+* Read [Lmod](software/modules/lmod/)
diff --git a/docs.it4i/software/mpi/mpi.md b/docs.it4i/software/mpi/mpi.md
index 8356521fa..b8a61b25e 100644
--- a/docs.it4i/software/mpi/mpi.md
+++ b/docs.it4i/software/mpi/mpi.md
@@ -136,6 +136,6 @@ In the previous two cases with one or two MPI processes per node, the operating
 
 ### Running OpenMPI
 
-The [**OpenMPI 1.8.6**](http://www.open-mpi.org/) is based on OpenMPI. Read more on [how to run OpenMPI](/software/mpi/Running_OpenMPI/) based MPI.
+The [**OpenMPI 1.8.6**](http://www.open-mpi.org/) is based on OpenMPI. Read more on [how to run OpenMPI](software/mpi/Running_OpenMPI/) based MPI.
 
-The Intel MPI may run on the [Intel Xeon Ph](/software/intel/intel-xeon-phi-salomon/) accelerators as well. Read more on [how to run Intel MPI on accelerators](software/intel/intel-xeon-phi-salomon/).
+The Intel MPI may run on the [Intel Xeon Ph](software/intel/intel-xeon-phi-salomon/) accelerators as well. Read more on [how to run Intel MPI on accelerators](software/intel/intel-xeon-phi-salomon/).
diff --git a/docs.it4i/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/software/mpi/mpi4py-mpi-for-python.md
index b6056fd45..ea39d0048 100644
--- a/docs.it4i/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/software/mpi/mpi4py-mpi-for-python.md
@@ -42,7 +42,7 @@ You need to import MPI to your python program. Include the following line to the
 from mpi4py import MPI
 ```
 
-The MPI4Py enabled python programs [execute as any other OpenMPI](/salomon/mpi/Running_OpenMPI/) code.The simpliest way is to run
+The MPI4Py enabled python programs [execute as any other OpenMPI](salomon/mpi/Running_OpenMPI/) code.The simpliest way is to run
 
 ```console
 $ mpiexec python <script>.py
diff --git a/docs.it4i/software/mpi/running-mpich2.md b/docs.it4i/software/mpi/running-mpich2.md
index b35d7370c..35452ed70 100644
--- a/docs.it4i/software/mpi/running-mpich2.md
+++ b/docs.it4i/software/mpi/running-mpich2.md
@@ -152,4 +152,4 @@ $ mpirun  -bindto numa echo $OMP_NUM_THREADS
 
 ## Intel MPI on Xeon Phi
 
-The [MPI section of Intel Xeon Phi chapter](/software/intel/intel-xeon-phi-salomon/) provides details on how to run Intel MPI code on Xeon Phi architecture.
+The [MPI section of Intel Xeon Phi chapter](software/intel/intel-xeon-phi-salomon/) provides details on how to run Intel MPI code on Xeon Phi architecture.
diff --git a/docs.it4i/software/numerical-languages/introduction.md b/docs.it4i/software/numerical-languages/introduction.md
index fa9cf2943..50ef10460 100644
--- a/docs.it4i/software/numerical-languages/introduction.md
+++ b/docs.it4i/software/numerical-languages/introduction.md
@@ -15,7 +15,7 @@ $ ml MATLAB
 $ matlab
 ```
 
-Read more at the [Matlab page](/software/numerical-languages/matlab/).
+Read more at the [Matlab page](software/numerical-languages/matlab/).
 
 ## Octave
 
@@ -26,7 +26,7 @@ $ ml Octave
 $ octave
 ```
 
-Read more at the [Octave page](/software/numerical-languages/octave/).
+Read more at the [Octave page](software/numerical-languages/octave/).
 
 ## R
 
@@ -37,4 +37,4 @@ $ ml R
 $ R
 ```
 
-Read more at the [R page](/software/numerical-languages/r/).
+Read more at the [R page](software/numerical-languages/r/).
diff --git a/docs.it4i/software/numerical-languages/matlab.md b/docs.it4i/software/numerical-languages/matlab.md
index 321250b3d..73a283ae9 100644
--- a/docs.it4i/software/numerical-languages/matlab.md
+++ b/docs.it4i/software/numerical-languages/matlab.md
@@ -21,9 +21,9 @@ $ ml av MATLAB
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
 
-If you require the Matlab GUI, follow the general information about [running graphical applications](/general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+If you require the Matlab GUI, follow the general information about [running graphical applications](general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
-Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended.
+Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended.
 
 To run Matlab with GUI, use
 
@@ -68,7 +68,7 @@ With the new mode, MATLAB itself launches the workers via PBS, so you can either
 
 ### Parallel Matlab Interactive Session
 
-Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](/general/accessing-the-clusters/graphical-user-interface/x-window-system/).
+Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
 ```console
 $ xhost +
@@ -249,11 +249,11 @@ delete(pool)
 
 ### Non-Interactive Session and Licenses
 
-If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](/software/isv_licenses/).
+If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](software/isv_licenses/).
 
 The licensing feature of PBS is currently disabled.
 
-In case of non-interactive session read the [following information](/software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
+In case of non-interactive session read the [following information](software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
 
 ### Matlab Distributed Computing Engines Start Up Time
 
diff --git a/docs.it4i/software/numerical-languages/matlab_1314.md b/docs.it4i/software/numerical-languages/matlab_1314.md
index 751e56a5e..8197edccb 100644
--- a/docs.it4i/software/numerical-languages/matlab_1314.md
+++ b/docs.it4i/software/numerical-languages/matlab_1314.md
@@ -3,7 +3,7 @@
 ## Introduction
 
 !!! note
-    This document relates to the old versions R2013 and R2014. For MATLAB 2015 use [this documentation instead](/software/numerical-languages/matlab/).
+    This document relates to the old versions R2013 and R2014. For MATLAB 2015 use [this documentation instead](software/numerical-languages/matlab/).
 
 Matlab is available in the latest stable version. There are always two variants of the release:
 
@@ -190,9 +190,9 @@ You can copy and paste the example in a .m file and execute. Note that the matla
 
 ### Non-Interactive Session and Licenses
 
-If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](/software/isv_licenses/).
+If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](software/isv_licenses/).
 
-In case of non-interactive session read the [following information](/software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
+In case of non-interactive session read the [following information](software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation.
 
 ### Matlab Distributed Computing Engines Start Up Time
 
diff --git a/docs.it4i/software/numerical-languages/octave.md b/docs.it4i/software/numerical-languages/octave.md
index 385724804..524611502 100644
--- a/docs.it4i/software/numerical-languages/octave.md
+++ b/docs.it4i/software/numerical-languages/octave.md
@@ -60,11 +60,11 @@ Octave may use MPI for interprocess communication This functionality is currentl
 
 ## Xeon Phi Support
 
-Octave may take advantage of the Xeon Phi accelerators. This will only work on the  [Intel Xeon Phi](/software/intel/intel-xeon-phi-salomon/)  [accelerated nodes](/salomon/compute-nodes/).
+Octave may take advantage of the Xeon Phi accelerators. This will only work on the  [Intel Xeon Phi](software/intel/intel-xeon-phi-salomon/)  [accelerated nodes](salomon/compute-nodes/).
 
 ### Automatic Offload Support
 
-Octave can accelerate BLAS type operations (in particular the Matrix Matrix multiplications] on the Xeon Phi accelerator, via [Automatic Offload using the MKL library](/software/intel/intel-xeon-phi-salomon/)
+Octave can accelerate BLAS type operations (in particular the Matrix Matrix multiplications] on the Xeon Phi accelerator, via [Automatic Offload using the MKL library](software/intel/intel-xeon-phi-salomon/)
 
 Example
 
@@ -88,7 +88,7 @@ In this example, the calculation was automatically divided among the CPU cores a
 
 ### Native Support
 
-A version of [native](/software/intel/intel-xeon-phi-salomon/) Octave is compiled for Xeon Phi accelerators. Some limitations apply for this version:
+A version of [native](software/intel/intel-xeon-phi-salomon/) Octave is compiled for Xeon Phi accelerators. Some limitations apply for this version:
 
 * Only command line support. GUI, graph plotting etc. is not supported.
 * Command history in interactive mode is not supported.
diff --git a/docs.it4i/software/numerical-languages/r.md b/docs.it4i/software/numerical-languages/r.md
index 9081feb84..81ba42081 100644
--- a/docs.it4i/software/numerical-languages/r.md
+++ b/docs.it4i/software/numerical-languages/r.md
@@ -66,7 +66,7 @@ cp routput.out $PBS_O_WORKDIR/.
 exit
 ```
 
-This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section - Anselm](/anselm/job-submission-and-execution/).
+This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section - Anselm](anselm/job-submission-and-execution/).
 
 ## Parallel R
 
@@ -144,7 +144,7 @@ Every evaluation of the integrad function runs in parallel on different process.
 
 package Rmpi provides an interface (wrapper) to MPI APIs.
 
-It also provides interactive R slave environment. On the cluster, Rmpi provides interface to the [OpenMPI](/software/mpi/Running_OpenMPI/).
+It also provides interactive R slave environment. On the cluster, Rmpi provides interface to the [OpenMPI](software/mpi/Running_OpenMPI/).
 
 Read more on Rmpi at <http://cran.r-project.org/web/packages/Rmpi/>, reference manual is available at [here](http://cran.r-project.org/web/packages/Rmpi/Rmpi.pdf)
 
@@ -390,7 +390,7 @@ cp routput.out $PBS_O_WORKDIR/.
 exit
 ```
 
-For more information about jobscripts and MPI execution refer to the [Job submission](/anselm/job-submission-and-execution/) and general [MPI](/software/mpi/mpi/) sections.
+For more information about jobscripts and MPI execution refer to the [Job submission](anselm/job-submission-and-execution/) and general [MPI](software/mpi/mpi/) sections.
 
 ## Xeon Phi Offload
 
@@ -400,4 +400,4 @@ By leveraging MKL, R can accelerate certain computations, most notably linear al
 $ export MKL_MIC_ENABLE=1
 ```
 
-[Read more about automatic offload](/software/intel/intel-xeon-phi-salomon/)
+[Read more about automatic offload](software/intel/intel-xeon-phi-salomon/)
diff --git a/docs.it4i/software/numerical-libraries/fftw.md b/docs.it4i/software/numerical-libraries/fftw.md
index 9ec5c5360..0807bd4d8 100644
--- a/docs.it4i/software/numerical-libraries/fftw.md
+++ b/docs.it4i/software/numerical-libraries/fftw.md
@@ -68,6 +68,6 @@ $ ml fftw3-mpi
 $ mpicc testfftw3mpi.c -o testfftw3mpi.x -Wl,-rpath=$LIBRARY_PATH -lfftw3_mpi
 ```
 
-Run the example as [Intel MPI program](/software/mpi/running-mpich2/).
+Run the example as [Intel MPI program](software/mpi/running-mpich2/).
 
 Read more on FFTW usage on the [FFTW website.](http://www.fftw.org/fftw3_doc/)
diff --git a/docs.it4i/software/numerical-libraries/hdf5.md b/docs.it4i/software/numerical-libraries/hdf5.md
index e0c6d7880..11cd26da0 100644
--- a/docs.it4i/software/numerical-libraries/hdf5.md
+++ b/docs.it4i/software/numerical-libraries/hdf5.md
@@ -84,6 +84,6 @@ $ ml hdf5-parallel
 $ mpicc hdf5test.c -o hdf5test.x -Wl,-rpath=$LIBRARY_PATH $HDF5_INC $HDF5_SHLIB
 ```
 
-Run the example as [Intel MPI program](/software/mpi/running-mpich2/).
+Run the example as [Intel MPI program](software/mpi/running-mpich2/).
 
 For further information, see the website: [http://www.hdfgroup.org/HDF5/](http://www.hdfgroup.org/HDF5/)
diff --git a/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md b/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md
index dcbe88894..f25d6edd5 100644
--- a/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md
+++ b/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md
@@ -10,7 +10,7 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e
 $ ml mkl **or** ml imkl
 ```
 
-Read more at the [Intel MKL](/software/intel/intel-suite/intel-mkl/) page.
+Read more at the [Intel MKL](software/intel/intel-suite/intel-mkl/) page.
 
 ## Intel Integrated Performance Primitives
 
@@ -20,7 +20,7 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai
 $ ml ipp
 ```
 
-Read more at the [Intel IPP](/software/intel/intel-suite/intel-integrated-performance-primitives/) page.
+Read more at the [Intel IPP](software/intel/intel-suite/intel-integrated-performance-primitives/) page.
 
 ## Intel Threading Building Blocks
 
@@ -30,4 +30,4 @@ Intel Threading Building Blocks (Intel TBB) is a library that supports scalable
 $ ml tbb
 ```
 
-Read more at the [Intel TBB](/software/intel/intel-suite/intel-tbb/) page.
+Read more at the [Intel TBB](software/intel/intel-suite/intel-tbb/) page.
diff --git a/docs.it4i/software/tools/ansys/ansys-cfx.md b/docs.it4i/software/tools/ansys/ansys-cfx.md
index 49a47327f..48dac488a 100644
--- a/docs.it4i/software/tools/ansys/ansys-cfx.md
+++ b/docs.it4i/software/tools/ansys/ansys-cfx.md
@@ -47,7 +47,7 @@ echo Machines: $hl
 /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r
 ```
 
-Header of the PBS file (above) is common and description can be find on [this site](/anselm/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+Header of the PBS file (above) is common and description can be find on [this site](anselm/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
 Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def
 
diff --git a/docs.it4i/software/tools/ansys/ansys-fluent.md b/docs.it4i/software/tools/ansys/ansys-fluent.md
index 63444f88a..ef50d856c 100644
--- a/docs.it4i/software/tools/ansys/ansys-fluent.md
+++ b/docs.it4i/software/tools/ansys/ansys-fluent.md
@@ -38,7 +38,7 @@ NCORES=`wc -l $PBS_NODEFILE |awk '{print $1}'`
 /ansys_inc/v145/fluent/bin/fluent 3d -t$NCORES -cnf=$PBS_NODEFILE -g -i fluent.jou
 ```
 
-Header of the pbs file (above) is common and description can be find on [this site](/salomon/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+Header of the pbs file (above) is common and description can be find on [this site](salomon/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
 Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common Fluent journal file which is attached to the Fluent solver via parameter -i fluent.jou
 
diff --git a/docs.it4i/software/tools/ansys/ansys-ls-dyna.md b/docs.it4i/software/tools/ansys/ansys-ls-dyna.md
index 00328000b..cdc14b1f4 100644
--- a/docs.it4i/software/tools/ansys/ansys-ls-dyna.md
+++ b/docs.it4i/software/tools/ansys/ansys-ls-dyna.md
@@ -50,6 +50,6 @@ echo Machines: $hl
 /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl
 ```
 
-Header of the PBS file (above) is common and description can be find on [this site](/anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+Header of the PBS file (above) is common and description can be find on [this site](anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
 Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ANSYS solver via parameter i=
diff --git a/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md b/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md
index e3e4c9379..c16443778 100644
--- a/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md
+++ b/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md
@@ -49,7 +49,7 @@ echo Machines: $hl
 /ansys_inc/v145/ansys/bin/ansys145 -b -dis -p aa_r -i input.dat -o file.out -machines $hl -dir $WORK_DIR
 ```
 
-Header of the PBS file (above) is common and description can be found on [this site](/anselm/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allow to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+Header of the PBS file (above) is common and description can be found on [this site](anselm/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allow to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
 Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ANSYS solver via parameter -i
 
diff --git a/docs.it4i/software/tools/ansys/ls-dyna.md b/docs.it4i/software/tools/ansys/ls-dyna.md
index 86320a5f1..43c818c2c 100644
--- a/docs.it4i/software/tools/ansys/ls-dyna.md
+++ b/docs.it4i/software/tools/ansys/ls-dyna.md
@@ -30,6 +30,6 @@ ml lsdyna
 /apps/engineering/lsdyna/lsdyna700s i=input.k
 ```
 
-Header of the PBS file (above) is common and description can be find on [this site](/anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+Header of the PBS file (above) is common and description can be find on [this site](anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
 Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA **.k** file which is attached to the LS-DYNA solver via parameter i=
diff --git a/docs.it4i/software/tools/virtualization.md b/docs.it4i/software/tools/virtualization.md
index 22abf7ce9..b2bb0fd0e 100644
--- a/docs.it4i/software/tools/virtualization.md
+++ b/docs.it4i/software/tools/virtualization.md
@@ -12,7 +12,7 @@ There are situations when Anselm's environment is not suitable for user needs.
 * Application requires privileged access to operating system
 * ... and combinations of above cases
 
-We offer solution for these cases - **virtualization**. Anselm's environment gives the possibility to run virtual machines on compute nodes. Users can create their own images of operating system with specific software stack and run instances of these images as virtual machines on compute nodes. Run of virtual machines is provided by standard mechanism of [Resource Allocation and Job Execution](/salomon/job-submission-and-execution/).
+We offer solution for these cases - **virtualization**. Anselm's environment gives the possibility to run virtual machines on compute nodes. Users can create their own images of operating system with specific software stack and run instances of these images as virtual machines on compute nodes. Run of virtual machines is provided by standard mechanism of [Resource Allocation and Job Execution](salomon/job-submission-and-execution/).
 
 Solution is based on QEMU-KVM software stack and provides hardware-assisted x86 virtualization.
 
@@ -203,7 +203,7 @@ Run script runs application from shared job directory (mapped as drive z:), proc
 
 ### Run Jobs
 
-Run jobs as usual, see  [Resource Allocation and Job Execution](/salomon/job-submission-and-execution/). Use only full node allocation for virtualization jobs.
+Run jobs as usual, see  [Resource Allocation and Job Execution](salomon/job-submission-and-execution/). Use only full node allocation for virtualization jobs.
 
 ### Running Virtual Machines
 
diff --git a/docs.it4i/software/viz/openfoam.md b/docs.it4i/software/viz/openfoam.md
index 34baa15cc..96e7213a3 100644
--- a/docs.it4i/software/viz/openfoam.md
+++ b/docs.it4i/software/viz/openfoam.md
@@ -112,7 +112,7 @@ Job submission (example for Anselm):
 $ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh
 ```
 
-For information about job submission [look here](/anselm/job-submission-and-execution/).
+For information about job submission [look here](anselm/job-submission-and-execution/).
 
 ## Running Applications in Parallel
 
diff --git a/docs.it4i/software/viz/paraview.md b/docs.it4i/software/viz/paraview.md
index 1678c9bfe..f425989b2 100644
--- a/docs.it4i/software/viz/paraview.md
+++ b/docs.it4i/software/viz/paraview.md
@@ -29,7 +29,7 @@ To launch the server, you must first allocate compute nodes, for example
 $ qsub -I -q qprod -A OPEN-0-0 -l select=2
 ```
 
-to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](/salomon/job-submission-and-execution/) for details.
+to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](salomon/job-submission-and-execution/) for details.
 
 After the interactive session is opened, load the ParaView module (following examples for Salomon, Anselm instructions in comments):
 
-- 
GitLab