diff --git a/.gitignore b/.gitignore
index 45ddf0ae397075d91d1660f81bc5f6c39f60f9fb..0682751bb669c6d17d9b86afc76457ada1cf145c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
 site/
+scripts/*.csv
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 712a7d6eccf8ca283a2b711f2908fefef91e6870..e658e14d225a198c4e631f47ff2e4c9a68d65c5f 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -24,6 +24,7 @@ capitalize:
   image: davidhrbac/docker-mkdocscheck:latest
   allow_failure: true
   script:
+  - scripts/titlemd_test.py mkdocs.yml
   - find docs.it4i/ -name '*.md' -print0 | xargs -0 -n1 scripts/titlemd_test.py
 
 spell check:
@@ -39,11 +40,19 @@ ext_links:
   image: davidhrbac/docker-mdcheck:latest
   allow_failure: true
   after_script:
+  # remove JSON results        
   - rm *.json
   script:
   #- find docs.it4i/ -name '*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10
-  #- find docs.it4i/ -name '*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10 --allow-dupe --allow-redirect
-  - find docs.it4i/ -name '*.md' ! -name 'modules-*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10 --allow-dupe --allow-redirect
+  - find docs.it4i/ -name '*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10 --allow-dupe --allow-redirect
+  #- |
+  #  COMMIT_MESSAGE=$(git log -1 --pretty=%B | grep "Merge branch 'hot_fix' into 'master'" )
+  #  if [[ ${COMMIT_MESSAGE} == *hot_fix* ]]; then
+  #    echo "Skipping ci build"
+  #    exit 0
+  #  else
+  #    find docs.it4i/ -name '*.md' ! -name 'modules-*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10 --allow-dupe --allow-redirect
+  #  fi
   only:
   - master
 
@@ -57,10 +66,13 @@ mkdocs:
   - bash scripts/add_version.sh
     # get modules list from clusters 
   - bash scripts/get_modules.sh
+    # regenerate modules matrix
+  - python scripts/modules-matrix.py > docs.it4i/modules-matrix.md
+  - python scripts/modules-json.py > docs.it4i/modules-matrix.json
     # build pages
   - mkdocs build
     # compress search_index.json
-  - bash scripts/clean_json.sh site/mkdocs/search_index.json
+    #- bash scripts/clean_json.sh site/mkdocs/search_index.json
     # replace broken links in 404.html
   - sed -i 's,href="" title=",href="/" title=",g' site/404.html
     # compress sitemap
@@ -99,7 +111,7 @@ deploy to stage:
   - chown nginx:nginx site -R
   - rsync -a --delete site/ root@"$SSH_HOST_STAGE":/srv/docs.it4i.cz/devel/$CI_BUILD_REF_NAME/
   only:
-  - branches@it4i-admins/docs.it4i
+  - branches@sccs/docs.it4i.cz
 
 deploy to production:
   environment: production
@@ -122,5 +134,5 @@ deploy to production:
   - chown nginx:nginx site -R
   - rsync -a --delete site/ root@"$SSH_HOST_STAGE":/srv/docs.it4i.cz/site/
   only:
-  - master@it4i-admins/docs.it4i
+  - master@sccs/docs.it4i.cz
   when: manual
diff --git a/README.md b/README.md
index cf8f9e087b1b2895d66171bd7ac5fa2fa478249c..78d4938aba704d2292ddb4e864a94f1435e90274 100644
--- a/README.md
+++ b/README.md
@@ -4,8 +4,8 @@ This is project contain IT4Innovation user documentation source.
 
 ## Environments
 
-* [https://docs-new.it4i.cz - master branch](https://docs-new.it4i.cz - master branch)
-* [https://docs-new.it4i.cz/devel/$BRANCH_NAME](https://docs-new.it4i.cz/devel/$BRANCH_NAME) - maps the branches
+* [https://docs.it4i.cz - master branch](https://docs.it4i.cz - master branch)
+* [https://docs.it4i.cz/devel/$BRANCH_NAME](https://docs.it4i.cz/devel/$BRANCH_NAME) - maps the branches, available only with VPN access
 
 ## URLs
 
@@ -15,7 +15,6 @@ This is project contain IT4Innovation user documentation source.
 ## Rules
 
 * [spellcheck https://github.com/lukeapage/node-markdown-spellcheck](spellcheck https://github.com/lukeapage/node-markdown-spellcheck)
-
 * [SI units http://physics.nist.gov/cuu/Units/checklist.html](SI units http://physics.nist.gov/cuu/Units/checklist.html)
 
 ```
@@ -26,6 +25,23 @@ CentOS
 Mellanox
 ```
 
+## Mathematical Formulae
+
+### Formulas are made with:
+
+* https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/
+* https://www.mathjax.org/
+
+You can add formula to page like this:
+
+```
+$$
+MAX\_FAIRSHARE * ( 1 - \frac{usage_{Project}}{usage_{Total}} )
+$$
+```
+
+To enable the MathJX on page you need to enable it by adding line ```---8<--- "mathjax.md"``` at the end of file.
+
 ## Developemnt Environment
 
 ### MkDocs
diff --git a/docs.it4i/anselm/capacity-computing.md b/docs.it4i/anselm/capacity-computing.md
index 6ce94ca34b77ac4b6cc24168fc36ae4e8e0839fa..b4a0c25b90aa93fccdf6a07d9c915d5da58411a1 100644
--- a/docs.it4i/anselm/capacity-computing.md
+++ b/docs.it4i/anselm/capacity-computing.md
@@ -41,7 +41,7 @@ Assume we have 900 input files with name beginning with "file" (e. g. file001, .
 
 First, we create a tasklist file (or subjobs list), listing all tasks (subjobs) - all input files in our example:
 
-```bash
+```console
 $ find . -name 'file*' > tasklist
 ```
 
@@ -78,7 +78,7 @@ If huge number of parallel multicore (in means of multinode multithread, e. g. M
 
 To submit the job array, use the qsub -J command. The 900 jobs of the [example above](capacity-computing/#array_example) may be submitted like this:
 
-```bash
+```console
 $ qsub -N JOBNAME -J 1-900 jobscript
 12345[].dm2
 ```
@@ -87,7 +87,7 @@ In this example, we submit a job array of 900 subjobs. Each subjob will run on f
 
 Sometimes for testing purposes, you may need to submit only one-element array. This is not allowed by PBSPro, but there's a workaround:
 
-```bash
+```console
 $ qsub -N JOBNAME -J 9-10:2 jobscript
 ```
 
@@ -97,7 +97,7 @@ This will only choose the lower index (9 in this example) for submitting/running
 
 Check status of the job array by the qstat command.
 
-```bash
+```console
 $ qstat -a 12345[].dm2
 
 dm2:
@@ -110,7 +110,7 @@ Job ID          Username Queue    Jobname    SessID NDS TSK Memory Time S Time
 The status B means that some subjobs are already running.
 Check status of the first 100 subjobs by the qstat command.
 
-```bash
+```console
 $ qstat -a 12345[1-100].dm2
 
 dm2:
@@ -128,24 +128,24 @@ Job ID          Username Queue    Jobname    SessID NDS TSK Memory Time S Time
 
 Delete the entire job array. Running subjobs will be killed, queueing subjobs will be deleted.
 
-```bash
+```console
 $ qdel 12345[].dm2
 ```
 
 Deleting large job arrays may take a while.
 Display status information for all user's jobs, job arrays, and subjobs.
 
-```bash
+```console
 $ qstat -u $USER -t
 ```
 
 Display status information for all user's subjobs.
 
-```bash
+```console
 $ qstat -u $USER -tJ
 ```
 
-Read more on job arrays in the [PBSPro Users guide](../../pbspro-documentation/).
+Read more on job arrays in the [PBSPro Users guide](../pbspro/).
 
 ## GNU Parallel
 
@@ -156,7 +156,7 @@ GNU parallel is a shell tool for executing jobs in parallel using one or more co
 
 For more information and examples see the parallel man page:
 
-```bash
+```console
 $ module add parallel
 $ man parallel
 ```
@@ -171,7 +171,7 @@ Assume we have 101 input files with name beginning with "file" (e. g. file001, .
 
 First, we create a tasklist file, listing all tasks - all input files in our example:
 
-```bash
+```console
 $ find . -name 'file*' > tasklist
 ```
 
@@ -209,7 +209,7 @@ In this example, tasks from tasklist are executed via the GNU parallel. The jobs
 
 To submit the job, use the qsub command. The 101 tasks' job of the [example above](capacity-computing/#gp_example) may be submitted like this:
 
-```bash
+```console
 $ qsub -N JOBNAME jobscript
 12345.dm2
 ```
@@ -239,13 +239,13 @@ Assume we have 992 input files with name beginning with "file" (e. g. file001, .
 
 First, we create a tasklist file, listing all tasks - all input files in our example:
 
-```bash
+```console
 $ find . -name 'file*' > tasklist
 ```
 
 Next we create a file, controlling how many tasks will be executed in one subjob
 
-```bash
+```console
 $ seq 32 > numtasks
 ```
 
@@ -294,7 +294,7 @@ When deciding this values, think about following guiding rules:
 
 To submit the job array, use the qsub -J command. The 992 tasks' job of the [example above](capacity-computing/#combined_example) may be submitted like this:
 
-```bash
+```console
 $ qsub -N JOBNAME -J 1-992:32 jobscript
 12345[].dm2
 ```
@@ -310,7 +310,7 @@ Download the examples in [capacity.zip](capacity.zip), illustrating the above li
 
 Unzip the archive in an empty directory on Anselm and follow the instructions in the README file
 
-```bash
+```console
 $ unzip capacity.zip
 $ cat README
 ```
diff --git a/docs.it4i/anselm/compute-nodes.md b/docs.it4i/anselm/compute-nodes.md
index 57a6df29e675632b1c5d1951232a7c2807313f15..6df69cce1d57b11c172340ee24f845d954708ea6 100644
--- a/docs.it4i/anselm/compute-nodes.md
+++ b/docs.it4i/anselm/compute-nodes.md
@@ -85,7 +85,7 @@ Anselm is equipped with Intel Sandy Bridge processors Intel Xeon E5-2665 (nodes
 
 Nodes equipped with Intel Xeon E5-2665 CPU have set PBS resource attribute cpu_freq = 24, nodes equipped with Intel Xeon E5-2470 CPU have set PBS resource attribute cpu_freq = 23.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16:cpu_freq=24 -I
 ```
 
@@ -93,8 +93,8 @@ In this example, we allocate 4 nodes, 16 cores at 2.4GHhz per node.
 
 Intel Turbo Boost Technology is used by default,  you can disable it for all nodes of job by using resource attribute cpu_turbo_boost.
 
-```bash
-    $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16 -l cpu_turbo_boost=0 -I
+```console
+$ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16 -l cpu_turbo_boost=0 -I
 ```
 
 ## Memory Architecture
diff --git a/docs.it4i/anselm/environment-and-modules.md b/docs.it4i/anselm/environment-and-modules.md
index 2aae813076a8f25d8b265cccb3d856f0dc8109fe..d460fa7023c41f16c9be748205061e78f26da3a9 100644
--- a/docs.it4i/anselm/environment-and-modules.md
+++ b/docs.it4i/anselm/environment-and-modules.md
@@ -4,7 +4,9 @@
 
 After logging in, you may want to configure the environment. Write your preferred path definitions, aliases, functions and module loads in the .bashrc file
 
-```bash
+```console
+$ cat ./bashrc
+
 # ./bashrc
 
 # Source global definitions
@@ -33,39 +35,39 @@ In order to configure your shell for running particular application on Anselm we
 !!! note
     The modules set up the application paths, library paths and environment variables for running particular application.
 
-    We have also second modules repository. This modules repository is created using tool called EasyBuild. On Salomon cluster, all modules will be build by this tool. If you want to use software from this modules repository, please follow instructions in section [Application Modules Path Expansion](environment-and-modules/#EasyBuild).
+    We have also second modules repository. This modules repository is created using tool called EasyBuild. On Salomon cluster, all modules will be build by this tool. If you want to use software from this modules repository, please follow instructions in section [Application Modules Path Expansion](environment-and-modules/#application-modules-path-expansion).
 
 The modules may be loaded, unloaded and switched, according to momentary needs.
 
 To check available modules use
 
-```bash
-$ module avail
+```console
+$ module avail **or** ml av
 ```
 
 To load a module, for example the octave module use
 
-```bash
-$ module load octave
+```console
+$ module load octave **or** ml octave
 ```
 
 loading the octave module will set up paths and environment variables of your active shell such that you are ready to run the octave software
 
 To check loaded modules use
 
-```bash
-$ module list
+```console
+$ module list **or** ml
 ```
 
  To unload a module, for example the octave module use
 
-```bash
-$ module unload octave
+```console
+$ module unload octave **or** ml -octave
 ```
 
 Learn more on modules by reading the module man page
 
-```bash
+```console
 $ man module
 ```
 
@@ -79,7 +81,7 @@ PrgEnv-intel sets up the INTEL development environment in conjunction with the I
 
 All application modules on Salomon cluster (and further) will be build using tool called [EasyBuild](http://hpcugent.github.io/easybuild/ "EasyBuild"). In case that you want to use some applications that are build by EasyBuild already, you have to modify your MODULEPATH environment variable.
 
-```bash
+```console
 export MODULEPATH=$MODULEPATH:/apps/easybuild/modules/all/
 ```
 
diff --git a/docs.it4i/anselm/job-priority.md b/docs.it4i/anselm/job-priority.md
index 06c7e921d38a35fac318acc7485dcf2c1a015ddf..30eba2bd004ff45ec5532d06ea556bdd08b7f56c 100644
--- a/docs.it4i/anselm/job-priority.md
+++ b/docs.it4i/anselm/job-priority.md
@@ -26,7 +26,7 @@ Fair-share priority is used for ranking jobs with equal queue priority.
 
 Fair-share priority is calculated as
 
-![](../img/fairshare_formula.png)
+---8<--- "fairshare_formula.md"
 
 where MAX_FAIRSHARE has value 1E6,
 usage<sub>Project</sub> is cumulated usage by all members of selected project,
@@ -52,7 +52,7 @@ Eligible time can be seen as eligible_time attribute of job.
 
 Job execution priority (job sort formula) is calculated as:
 
-![](../img/job_sort_formula.png)
+---8<--- "job_sort_formula.md"
 
 ### Job backfilling
 
@@ -68,3 +68,5 @@ It means, that jobs with lower execution priority can be run before jobs with hi
     It is **very beneficial to specify the walltime** when submitting jobs.
 
 Specifying more accurate walltime enables better scheduling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority.
+
+---8<--- "mathjax.md"
diff --git a/docs.it4i/anselm/job-submission-and-execution.md b/docs.it4i/anselm/job-submission-and-execution.md
index b0ea19bd17cecb7fb2199c6112e0e0340a1d0b1a..63490e6a8123229d4b413847f86130fdec396441 100644
--- a/docs.it4i/anselm/job-submission-and-execution.md
+++ b/docs.it4i/anselm/job-submission-and-execution.md
@@ -16,33 +16,36 @@ When allocating computational resources for the job, please specify
 
 Submit the job using the qsub command:
 
-```bash
+```console
 $ qsub -A Project_ID -q queue -l select=x:ncpus=y,walltime=[[hh:]mm:]ss[.ms] jobscript
 ```
 
 The qsub submits the job into the queue, in another words the qsub command creates a request to the PBS Job manager for allocation of specified resources. The resources will be allocated when available, subject to above described policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.**
 
+!!! note
+    PBS statement nodes (qsub -l nodes=nodespec) is not supported on Anselm cluster.
+
 ### Job Submission Examples
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=64:ncpus=16,walltime=03:00:00 ./myjob
 ```
 
 In this example, we allocate 64 nodes, 16 cores per node, for 3 hours. We allocate these resources via the qprod queue, consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. Jobscript myjob will be executed on the first node in the allocation.
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=16 -I
 ```
 
 In this example, we allocate 4 nodes, 16 cores per node, for 1 hour. We allocate these resources via the qexp queue. The resources will be available interactively
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qnvidia -l select=10:ncpus=16 ./myjob
 ```
 
 In this example, we allocate 10 nvidia accelerated nodes, 16 cores per node, for 24 hours. We allocate these resources via the qnvidia queue. Jobscript myjob will be executed on the first node in the allocation.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qfree -l select=10:ncpus=16 ./myjob
 ```
 
@@ -50,13 +53,13 @@ In this example, we allocate 10 nodes, 16 cores per node, for 12 hours. We alloc
 
 All qsub options may be [saved directly into the jobscript](#example-jobscript-for-mpi-calculation-with-preloaded-inputs). In such a case, no options to qsub are needed.
 
-```bash
+```console
 $ qsub ./myjob
 ```
 
 By default, the PBS batch system sends an e-mail only when the job is aborted. Disabling mail events completely can be done like this:
 
-```bash
+```console
 $ qsub -m n
 ```
 
@@ -66,8 +69,8 @@ $ qsub -m n
 
 Specific nodes may be allocated via the PBS
 
-```bash
-qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16:host=cn171+1:ncpus=16:host=cn172 -I
+```console
+$ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16:host=cn171+1:ncpus=16:host=cn172 -I
 ```
 
 In this example, we allocate nodes cn171 and cn172, all 16 cores per node, for 24 hours.  Consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. The resources will be available interactively.
@@ -81,7 +84,7 @@ Nodes equipped with Intel Xeon E5-2665 CPU have base clock frequency 2.4GHz, nod
 | Intel Xeon E5-2665 | 2.4GHz     | cn[1-180], cn[208-209] | 24                 |
 | Intel Xeon E5-2470 | 2.3GHz     | cn[181-207]            | 23                 |
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16:cpu_freq=24 -I
 ```
 
@@ -89,14 +92,14 @@ In this example, we allocate 4 nodes, 16 cores, selecting only the nodes with In
 
 ### Placement by IB Switch
 
-Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](../network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and provides for unbiased, most efficient network communication.
+Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and provides for unbiased, most efficient network communication.
 
-Nodes sharing the same switch may be selected via the PBS resource attribute ibswitch. Values of this attribute are iswXX, where XX is the switch number. The node-switch mapping can be seen at [Hardware Overview](../hardware-overview/) section.
+Nodes sharing the same switch may be selected via the PBS resource attribute ibswitch. Values of this attribute are iswXX, where XX is the switch number. The node-switch mapping can be seen at [Hardware Overview](hardware-overview/) section.
 
 We recommend allocating compute nodes of a single switch when best possible computational network performance is required to run the job efficiently:
 
-```bash
-    qsub -A OPEN-0-0 -q qprod -l select=18:ncpus=16:ibswitch=isw11 ./myjob
+```console
+$ qsub -A OPEN-0-0 -q qprod -l select=18:ncpus=16:ibswitch=isw11 ./myjob
 ```
 
 In this example, we request all the 18 nodes sharing the isw11 switch for 24 hours. Full chassis will be allocated.
@@ -109,8 +112,8 @@ Intel Turbo Boost Technology is on by default. We strongly recommend keeping the
 
 If necessary (such as in case of benchmarking) you can disable the Turbo for all nodes of the job by using the PBS resource attribute cpu_turbo_boost
 
-```bash
-    $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16 -l cpu_turbo_boost=0 -I
+```console
+$ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16 -l cpu_turbo_boost=0 -I
 ```
 
 More about the Intel Turbo Boost in the TurboBoost section
@@ -119,8 +122,8 @@ More about the Intel Turbo Boost in the TurboBoost section
 
 In the following example, we select an allocation for benchmarking a very special and demanding MPI program. We request Turbo off, 2 full chassis of compute nodes (nodes sharing the same IB switches) for 30 minutes:
 
-```bash
-    $ qsub -A OPEN-0-0 -q qprod
+```console
+$ qsub -A OPEN-0-0 -q qprod
     -l select=18:ncpus=16:ibswitch=isw10:mpiprocs=1:ompthreads=16+18:ncpus=16:ibswitch=isw20:mpiprocs=16:ompthreads=1
     -l cpu_turbo_boost=0,walltime=00:30:00
     -N Benchmark ./mybenchmark
@@ -135,7 +138,7 @@ Although this example is somewhat artificial, it demonstrates the flexibility of
 !!! note
     Check status of your jobs using the **qstat** and **check-pbs-jobs** commands
 
-```bash
+```console
 $ qstat -a
 $ qstat -a -u username
 $ qstat -an -u username
@@ -144,7 +147,7 @@ $ qstat -f 12345.srv11
 
 Example:
 
-```bash
+```console
 $ qstat -a
 
 srv11:
@@ -160,19 +163,17 @@ In this example user1 and user2 are running jobs named job1, job2 and job3x. The
 
 Check status of your jobs using check-pbs-jobs command. Check presence of user's PBS jobs' processes on execution hosts. Display load, processes. Display job standard and error output. Continuously display (tail -f) job standard or error output.
 
-```bash
+```console
 $ check-pbs-jobs --check-all
 $ check-pbs-jobs --print-load --print-processes
 $ check-pbs-jobs --print-job-out --print-job-err
-
 $ check-pbs-jobs --jobid JOBID --check-all --print-all
-
 $ check-pbs-jobs --jobid JOBID --tailf-job-out
 ```
 
 Examples:
 
-```bash
+```console
 $ check-pbs-jobs --check-all
 JOB 35141.dm2, session_id 71995, user user2, nodes cn164,cn165
 Check session id: OK
@@ -183,7 +184,7 @@ cn165: No process
 
 In this example we see that job 35141.dm2 currently runs no process on allocated node cn165, which may indicate an execution error.
 
-```bash
+```console
 $ check-pbs-jobs --print-load --print-processes
 JOB 35141.dm2, session_id 71995, user user2, nodes cn164,cn165
 Print load
@@ -199,7 +200,7 @@ cn164: 99.7 run-task
 
 In this example we see that job 35141.dm2 currently runs process run-task on node cn164, using one thread only, while node cn165 is empty, which may indicate an execution error.
 
-```bash
+```console
 $ check-pbs-jobs --jobid 35141.dm2 --print-job-out
 JOB 35141.dm2, session_id 71995, user user2, nodes cn164,cn165
 Print job standard output:
@@ -218,19 +219,19 @@ In this example, we see actual output (some iteration loops) of the job 35141.dm
 
 You may release your allocation at any time, using qdel command
 
-```bash
+```console
 $ qdel 12345.srv11
 ```
 
 You may kill a running job by force, using qsig command
 
-```bash
+```console
 $ qsig -s 9 12345.srv11
 ```
 
 Learn more by reading the pbs man page
 
-```bash
+```console
 $ man pbs_professional
 ```
 
@@ -246,7 +247,7 @@ The Jobscript is a user made script, controlling sequence of commands for execut
 !!! note
     The jobscript or interactive shell is executed on first of the allocated nodes.
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob
 $ qstat -n -u username
 
@@ -262,7 +263,7 @@ In this example, the nodes cn17, cn108, cn109 and cn110 were allocated for 1 hou
 
 The jobscript or interactive shell is by default executed in home directory
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=16 -I
 qsub: waiting for job 15210.srv11 to start
 qsub: job 15210.srv11 ready
@@ -280,7 +281,7 @@ The allocated nodes are accessible via ssh from login nodes. The nodes may acces
 
 Calculations on allocated nodes may be executed remotely via the MPI, ssh, pdsh or clush. You may find out which nodes belong to the allocation by reading the $PBS_NODEFILE file
 
-```bash
+```console
 qsub -q qexp -l select=4:ncpus=16 -I
 qsub: waiting for job 15210.srv11 to start
 qsub: job 15210.srv11 ready
diff --git a/docs.it4i/anselm/network.md b/docs.it4i/anselm/network.md
index a2af06f97a85472d327eeffc4a743d5eb70d6bb1..79c6f1a37f0d22f286e4de57dac097dcea8d19e8 100644
--- a/docs.it4i/anselm/network.md
+++ b/docs.it4i/anselm/network.md
@@ -19,7 +19,7 @@ The compute nodes may be accessed via the regular Gigabit Ethernet network inter
 
 ## Example
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob
 $ qstat -n -u username
                                                             Req'd Req'd   Elap
diff --git a/docs.it4i/anselm/prace.md b/docs.it4i/anselm/prace.md
index c4d3bce0f399ff78168f86fc8fb0c5f3e0a95999..061cd0a0714075f3caca51152363e10ff795176f 100644
--- a/docs.it4i/anselm/prace.md
+++ b/docs.it4i/anselm/prace.md
@@ -36,14 +36,14 @@ Most of the information needed by PRACE users accessing the Anselm TIER-1 system
 
 Before you start to use any of the services don't forget to create a proxy certificate from your certificate:
 
-```bash
-    $ grid-proxy-init
+```console
+$ grid-proxy-init
 ```
 
 To check whether your proxy certificate is still valid (by default it's valid 12 hours), use:
 
-```bash
-    $ grid-proxy-info
+```console
+$ grid-proxy-info
 ```
 
 To access Anselm cluster, two login nodes running GSI SSH service are available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners).
@@ -58,14 +58,14 @@ It is recommended to use the single DNS name anselm-prace.it4i.cz which is distr
 | login1-prace.anselm.it4i.cz | 2222 | gsissh   | login1           |
 | login2-prace.anselm.it4i.cz | 2222 | gsissh   | login2           |
 
-```bash
-    $ gsissh -p 2222 anselm-prace.it4i.cz
+```console
+$ gsissh -p 2222 anselm-prace.it4i.cz
 ```
 
 When logging from other PRACE system, the prace_service script can be used:
 
-```bash
-    $ gsissh `prace_service -i -s anselm`
+```console
+$ gsissh `prace_service -i -s anselm`
 ```
 
 #### Access From Public Internet:
@@ -78,26 +78,26 @@ It is recommended to use the single DNS name anselm.it4i.cz which is distributed
 | login1.anselm.it4i.cz | 2222 | gsissh   | login1           |
 | login2.anselm.it4i.cz | 2222 | gsissh   | login2           |
 
-```bash
-    $ gsissh -p 2222 anselm.it4i.cz
+```console
+$ gsissh -p 2222 anselm.it4i.cz
 ```
 
 When logging from other PRACE system, the prace_service script can be used:
 
-```bash
-    $ gsissh `prace_service -e -s anselm`
+```console
+$ gsissh `prace_service -e -s anselm`
 ```
 
 Although the preferred and recommended file transfer mechanism is [using GridFTP](prace/#file-transfers), the GSI SSH implementation on Anselm supports also SCP, so for small files transfer gsiscp can be used:
 
-```bash
-    $ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ anselm.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_
+```console
+$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ anselm.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_
 
-    $ gsiscp -P 2222 anselm.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
+$ gsiscp -P 2222 anselm.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
 
-    $ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_
+$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_
 
-    $ gsiscp -P 2222 anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
+$ gsiscp -P 2222 anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 ### Access to X11 Applications (VNC)
@@ -106,8 +106,8 @@ If the user needs to run X11 based graphical application and does not have a X11
 
 If the user uses GSI SSH based access, then the procedure is similar to the SSH based access, only the port forwarding must be done using GSI SSH:
 
-```bash
-    $ gsissh -p 2222 anselm.it4i.cz -L 5961:localhost:5961
+```console
+$ gsissh -p 2222 anselm.it4i.cz -L 5961:localhost:5961
 ```
 
 ### Access With SSH
@@ -133,26 +133,26 @@ There's one control server and three backend servers for striping and/or backup
 
 Copy files **to** Anselm by running the following commands on your local machine:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
 ```
 
 Copy files **from** Anselm:
 
-```bash
-    $ globus-url-copy gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 ### Access From Public Internet
@@ -166,26 +166,26 @@ Or by using prace_service script:
 
 Copy files **to** Anselm by running the following commands on your local machine:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
 ```
 
 Copy files **from** Anselm:
 
-```bash
-    $ globus-url-copy gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 Generally both shared file systems are available through GridFTP:
@@ -209,8 +209,8 @@ All system wide installed software on the cluster is made available to the users
 
 PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/prace-common-production-environment/).
 
-```bash
-    $ module load prace
+```console
+$ module load prace
 ```
 
 ### Resource Allocation and Job Execution
@@ -239,10 +239,10 @@ Users who have undergone the full local registration procedure (including signin
     You need to know your user password to use the command. Displayed core hours are "system core hours" which differ from PRACE "standardized core hours".
 
 !!! hint
-    The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients>
+    The **it4ifree** command is a part of it4i.portal.clients package, [located here](https://pypi.python.org/pypi/it4i.portal.clients).
 
-```bash
-    $ it4ifree
+```console
+$ it4ifree
     Password:
          PID    Total   Used   ...by me Free
        -------- ------- ------ -------- -------
@@ -252,9 +252,9 @@ Users who have undergone the full local registration procedure (including signin
 
 By default file system quota is applied. To check the current status of the quota use
 
-```bash
-    $ lfs quota -u USER_LOGIN /home
-    $ lfs quota -u USER_LOGIN /scratch
+```console
+$ lfs quota -u USER_LOGIN /home
+$ lfs quota -u USER_LOGIN /scratch
 ```
 
 If the quota is insufficient, please contact the [support](prace/#help-and-support) and request an increase.
diff --git a/docs.it4i/anselm/remote-visualization.md b/docs.it4i/anselm/remote-visualization.md
index 7b0149fce735ac31592baa6f232cf3be2ffc5a54..e5a439b4654da5342101d15287212501b87c0df9 100644
--- a/docs.it4i/anselm/remote-visualization.md
+++ b/docs.it4i/anselm/remote-visualization.md
@@ -46,7 +46,7 @@ To have the OpenGL acceleration, **24 bit color depth must be used**. Otherwise
 
 This example defines desktop with dimensions 1200x700 pixels and 24 bit color depth.
 
-```bash
+```console
 $ module load turbovnc/1.2.2
 $ vncserver -geometry 1200x700 -depth 24
 
@@ -58,7 +58,7 @@ Log file is /home/username/.vnc/login2:1.log
 
 #### 3. Remember Which Display Number Your VNC Server Runs (You Will Need It in the Future to Stop the Server)
 
-```bash
+```console
 $ vncserver -list
 
 TurboVNC server sessions:
@@ -71,7 +71,7 @@ In this example the VNC server runs on display **:1**.
 
 #### 4. Remember the Exact Login Node, Where Your VNC Server Runs
 
-```bash
+```console
 $ uname -n
 login2
 ```
@@ -82,7 +82,7 @@ In this example the VNC server runs on **login2**.
 
 To get the port you have to look to the log file of your VNC server.
 
-```bash
+```console
 $ grep -E "VNC.*port" /home/username/.vnc/login2:1.log
 20/02/2015 14:46:41 Listening for VNC connections on TCP port 5901
 ```
@@ -93,7 +93,7 @@ In this example the VNC server listens on TCP port **5901**.
 
 Tunnel the TCP port on which your VNC server is listenning.
 
-```bash
+```console
 $ ssh login2.anselm.it4i.cz -L 5901:localhost:5901
 ```
 
@@ -109,7 +109,7 @@ Get it from: <http://sourceforge.net/projects/turbovnc/>
 
 Mind that you should connect through the SSH tunneled port. In this example it is 5901 on your workstation (localhost).
 
-```bash
+```console
 $ vncviewer localhost:5901
 ```
 
@@ -123,7 +123,7 @@ Now you should have working TurboVNC session connected to your workstation.
 
 Don't forget to correctly shutdown your own VNC server on the login node!
 
-```bash
+```console
 $ vncserver -kill :1
 ```
 
@@ -147,13 +147,13 @@ To access the visualization node, follow these steps:
 
 This step is necessary to allow you to proceed with next steps.
 
-```bash
+```console
 $ qsub -I -q qviz -A PROJECT_ID
 ```
 
 In this example the default values for CPU cores and usage time are used.
 
-```bash
+```console
 $ qsub -I -q qviz -A PROJECT_ID -l select=1:ncpus=16 -l walltime=02:00:00
 ```
 
@@ -163,7 +163,7 @@ In this example a whole node for 2 hours is requested.
 
 If there are free resources for your request, you will have a shell unning on an assigned node. Please remember the name of the node.
 
-```bash
+```console
 $ uname -n
 srv8
 ```
@@ -174,7 +174,7 @@ In this example the visualization session was assigned to node **srv8**.
 
 Setup the VirtualGL connection to the node, which PBSPro allocated for our job.
 
-```bash
+```console
 $ vglconnect srv8
 ```
 
@@ -182,19 +182,19 @@ You will be connected with created VirtualGL tunnel to the visualization ode, wh
 
 #### 3. Load the VirtualGL Module
 
-```bash
+```console
 $ module load virtualgl/2.4
 ```
 
 #### 4. Run Your Desired OpenGL Accelerated Application Using VirtualGL Script "Vglrun"
 
-```bash
+```console
 $ vglrun glxgears
 ```
 
 If you want to run an OpenGL application which is vailable through modules, you need at first load the respective module. E.g. to run the **Mentat** OpenGL application from **MARC** software ackage use:
 
-```bash
+```console
 $ module load marc/2013.1
 $ vglrun mentat
 ```
diff --git a/docs.it4i/anselm/resource-allocation-and-job-execution.md b/docs.it4i/anselm/resource-allocation-and-job-execution.md
index b04a95ead56383feaf887c3121495c091d0d380a..8df8072c9e5ddefbeba31c071309697ae1d6f92b 100644
--- a/docs.it4i/anselm/resource-allocation-and-job-execution.md
+++ b/docs.it4i/anselm/resource-allocation-and-job-execution.md
@@ -1,6 +1,6 @@
 # Resource Allocation and Job Execution
 
-To run a [job](../introduction/), [computational resources](../introduction/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide.
+To run a [job](ob-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro/), especially in the PBS Pro User's Guide.
 
 ## Resources Allocation Policy
 
diff --git a/docs.it4i/anselm/resources-allocation-policy.md b/docs.it4i/anselm/resources-allocation-policy.md
index 16cb7510d63075d413a19a9a9702ebbf23a4fb78..7ed577a23fbc25aa38487157915e482da168313e 100644
--- a/docs.it4i/anselm/resources-allocation-policy.md
+++ b/docs.it4i/anselm/resources-allocation-policy.md
@@ -43,13 +43,13 @@ Anselm users may check current queue configuration at <https://extranet.it4i.cz/
 
 Display the queue status on Anselm:
 
-```bash
+```console
 $ qstat -q
 ```
 
 The PBS allocation overview may be obtained also using the rspbs command.
 
-```bash
+```console
 $ rspbs
 Usage: rspbs [options]
 
@@ -118,7 +118,7 @@ The resources that are currently subject to accounting are the core-hours. The c
 
 User may check at any time, how many core-hours have been consumed by himself/herself and his/her projects. The command is available on clusters' login nodes.
 
-```bash
+```console
 $ it4ifree
 Password:
      PID    Total   Used   ...by me Free
diff --git a/docs.it4i/anselm/shell-and-data-access.md b/docs.it4i/anselm/shell-and-data-access.md
index 260945ed1b896b1740a98f5de44a5e2caa9910e3..e850c88133c723937ecdd17ec6e6eb08d7e7541f 100644
--- a/docs.it4i/anselm/shell-and-data-access.md
+++ b/docs.it4i/anselm/shell-and-data-access.md
@@ -22,13 +22,13 @@ Private key authentication:
 
 On **Linux** or **Mac**, use
 
-```bash
+```console
 local $ ssh -i /path/to/id_rsa username@anselm.it4i.cz
 ```
 
 If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to set lower permissions to private key file.
 
-```bash
+```console
 local $ chmod 600 /path/to/id_rsa
 ```
 
@@ -36,7 +36,7 @@ On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-a
 
 After logging in, you will see the command prompt:
 
-```bash
+```console
                                             _
                        /\                  | |
                       /  \   _ __  ___  ___| |_ __ ___
@@ -81,23 +81,23 @@ To achieve 160MB/s transfer rates, the end user must be connected by 10G line al
 
 On linux or Mac, use scp or sftp client to transfer the data to Anselm:
 
-```bash
+```console
 local $ scp -i /path/to/id_rsa my-local-file username@anselm.it4i.cz:directory/file
 ```
 
-```bash
+```console
 local $ scp -i /path/to/id_rsa -r my-local-dir username@anselm.it4i.cz:directory
 ```
 
 or
 
-```bash
+```console
 local $ sftp -o IdentityFile=/path/to/id_rsa username@anselm.it4i.cz
 ```
 
 Very convenient way to transfer files in and out of the Anselm computer is via the fuse filesystem [sshfs](http://linux.die.net/man/1/sshfs)
 
-```bash
+```console
 local $ sshfs -o IdentityFile=/path/to/id_rsa username@anselm.it4i.cz:. mountpoint
 ```
 
@@ -105,7 +105,7 @@ Using sshfs, the users Anselm home directory will be mounted on your local compu
 
 Learn more on ssh, scp and sshfs by reading the manpages
 
-```bash
+```console
 $ man ssh
 $ man scp
 $ man sshfs
@@ -142,7 +142,7 @@ It works by tunneling the connection from Anselm back to users workstation and f
 
 Pick some unused port on Anselm login node  (for example 6000) and establish the port forwarding:
 
-```bash
+```console
 local $ ssh -R 6000:remote.host.com:1234 anselm.it4i.cz
 ```
 
@@ -152,7 +152,7 @@ Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration
 
 Port forwarding may be established directly to the remote host. However, this requires that user has ssh access to remote.host.com
 
-```bash
+```console
 $ ssh -L 6000:localhost:1234 remote.host.com
 ```
 
@@ -167,7 +167,7 @@ First, establish the remote port forwarding form the login node, as [described a
 
 Second, invoke port forwarding from the compute node to the login node. Insert following line into your jobscript or interactive shell
 
-```bash
+```console
 $ ssh  -TN -f -L 6000:localhost:6000 login1
 ```
 
@@ -182,7 +182,7 @@ Port forwarding is static, each single port is mapped to a particular port on re
 
 To establish local proxy server on your workstation, install and run SOCKS proxy server software. On Linux, sshd demon provides the functionality. To establish SOCKS proxy server listening on port 1080 run:
 
-```bash
+```console
 local $ ssh -D 1080 localhost
 ```
 
@@ -190,7 +190,7 @@ On Windows, install and run the free, open source [Sock Puppet](http://sockspupp
 
 Once the proxy server is running, establish ssh port forwarding from Anselm to the proxy server, port 1080, exactly as [described above](#port-forwarding-from-login-nodes).
 
-```bash
+```console
 local $ ssh -R 6000:localhost:1080 anselm.it4i.cz
 ```
 
diff --git a/docs.it4i/anselm/software/ansys/ansys-fluent.md b/docs.it4i/anselm/software/ansys/ansys-fluent.md
index ff1f7cdd21a26283fd7522fc2cc286f00bde73a7..4521c758ed7def8e6795f9de97ecb0d698cd9dc9 100644
--- a/docs.it4i/anselm/software/ansys/ansys-fluent.md
+++ b/docs.it4i/anselm/software/ansys/ansys-fluent.md
@@ -44,7 +44,7 @@ Working directory has to be created before sending pbs job into the queue. Input
 
 Journal file with definition of the input geometry and boundary conditions and defined process of solution has e.g. the following structure:
 
-```bash
+```console
     /file/read-case aircraft_2m.cas.gz
     /solve/init
     init
@@ -58,7 +58,7 @@ The appropriate dimension of the problem has to be set by parameter (2d/3d).
 
 ## Fast Way to Run Fluent From Command Line
 
-```bash
+```console
 fluent solver_version [FLUENT_options] -i journal_file -pbs
 ```
 
@@ -68,7 +68,7 @@ This syntax will start the ANSYS FLUENT job under PBS Professional using the qsu
 
 The sample script uses a configuration file called pbs_fluent.conf if no command line arguments are present. This configuration file should be present in the directory from which the jobs are submitted (which is also the directory in which the jobs are executed). The following is an example of what the content of pbs_fluent.conf can be:
 
-```bash
+```console
 input="example_small.flin"
 case="Small-1.65m.cas"
 fluent_args="3d -pmyrinet"
@@ -145,7 +145,7 @@ It runs the jobs out of the directory from which they are submitted (PBS_O_WORKD
 
 Fluent could be run in parallel only under Academic Research license. To do so this ANSYS Academic Research license must be placed before ANSYS CFD license in user preferences. To make this change anslic_admin utility should be run
 
-```bash
+```console
 /ansys_inc/shared_les/licensing/lic_admin/anslic_admin
 ```
 
diff --git a/docs.it4i/anselm/software/ansys/ansys.md b/docs.it4i/anselm/software/ansys/ansys.md
index 16be5639d93fc6d14baaff251a5b09a1d0e31b62..24b8b1c09721168d11a214f00a2ee50a109e6c20 100644
--- a/docs.it4i/anselm/software/ansys/ansys.md
+++ b/docs.it4i/anselm/software/ansys/ansys.md
@@ -6,8 +6,8 @@ Anselm provides commercial as well as academic variants. Academic variants are d
 
 To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module:
 
-```bash
-    $ module load ansys
+```console
+$ ml ansys
 ```
 
 ANSYS supports interactive regime, but due to assumed solution of extremely difficult tasks it is not recommended.
diff --git a/docs.it4i/anselm/software/chemistry/nwchem.md b/docs.it4i/anselm/software/chemistry/nwchem.md
index 9f09fe794a121ddc173d3a037fe0e6e3e7101163..e4f84d49f9b8a38cba53f212d7db1bc6c8c8c7d2 100644
--- a/docs.it4i/anselm/software/chemistry/nwchem.md
+++ b/docs.it4i/anselm/software/chemistry/nwchem.md
@@ -17,8 +17,8 @@ The following versions are currently installed:
 
 For a current list of installed versions, execute:
 
-```bash
-    module avail nwchem
+```console
+$ ml av nwchem
 ```
 
 ## Running
diff --git a/docs.it4i/anselm/software/compilers.md b/docs.it4i/anselm/software/compilers.md
index d1e59f29fd5c7862e8ad28780c1355ba837f8da1..71e60499b1bb335ddb7a6919e22457aa70b68fa5 100644
--- a/docs.it4i/anselm/software/compilers.md
+++ b/docs.it4i/anselm/software/compilers.md
@@ -22,20 +22,19 @@ For compatibility reasons there are still available the original (old 4.4.6-4) v
 
 It is strongly recommended to use the up to date version (4.8.1) which comes with the module gcc:
 
-```bash
-    $ module load gcc
-    $ gcc -v
-    $ g++ -v
-    $ gfortran -v
+```console
+$ ml gcc
+$ gcc -v
+$ g++ -v
+$ gfortran -v
 ```
 
 With the module loaded two environment variables are predefined. One for maximum optimizations on the Anselm cluster architecture, and the other for debugging purposes:
 
-```bash
-    $ echo $OPTFLAGS
+```console
+$ echo $OPTFLAGS
     -O3 -march=corei7-avx
-
-    $ echo $DEBUGFLAGS
+$ echo $DEBUGFLAGS
     -O0 -g
 ```
 
@@ -52,16 +51,16 @@ For more information about the possibilities of the compilers, please see the ma
 
 To use the GNU UPC compiler and run the compiled binaries use the module gupc
 
-```bash
-    $ module add gupc
-    $ gupc -v
-    $ g++ -v
+```console
+$ module add gupc
+$ gupc -v
+$ g++ -v
 ```
 
 Simple program to test the compiler
 
-```bash
-    $ cat count.upc
+```console
+$ cat count.upc
 
     /* hello.upc - a simple UPC example */
     #include <upc.h>
@@ -79,14 +78,14 @@ Simple program to test the compiler
 
 To compile the example use
 
-```bash
-    $ gupc -o count.upc.x count.upc
+```console
+$ gupc -o count.upc.x count.upc
 ```
 
 To run the example with 5 threads issue
 
-```bash
-    $ ./count.upc.x -fupc-threads-5
+```console
+$ ./count.upc.x -fupc-threads-5
 ```
 
 For more information see the man pages.
@@ -95,9 +94,9 @@ For more information see the man pages.
 
 To use the Berkley UPC compiler and runtime environment to run the binaries use the module bupc
 
-```bash
-    $ module add bupc
-    $ upcc -version
+```console
+$ module add bupc
+$ upcc -version
 ```
 
 As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only.
@@ -109,8 +108,8 @@ For production runs, it is recommended to use the native Infiband implementation
 
 Example UPC code:
 
-```bash
-    $ cat hello.upc
+```console
+$ cat hello.upc
 
     /* hello.upc - a simple UPC example */
     #include <upc.h>
@@ -128,22 +127,22 @@ Example UPC code:
 
 To compile the example with the "ibv" UPC network use
 
-```bash
-    $ upcc -network=ibv -o hello.upc.x hello.upc
+```console
+$ upcc -network=ibv -o hello.upc.x hello.upc
 ```
 
 To run the example with 5 threads issue
 
-```bash
-    $ upcrun -n 5 ./hello.upc.x
+```console
+$ upcrun -n 5 ./hello.upc.x
 ```
 
 To run the example on two compute nodes using all 32 cores, with 32 threads, issue
 
-```bash
-    $ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=16
-    $ module add bupc
-    $ upcrun -n 32 ./hello.upc.x
+```console
+$ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=16
+$ module add bupc
+$ upcrun -n 32 ./hello.upc.x
 ```
 
 For more information see the man pages.
diff --git a/docs.it4i/anselm/software/comsol-multiphysics.md b/docs.it4i/anselm/software/comsol-multiphysics.md
index 457c1aa8fc5d34a4429d5684f977da70d7683b4a..74672428542f3643d754768b7d3c44ed22f22cb6 100644
--- a/docs.it4i/anselm/software/comsol-multiphysics.md
+++ b/docs.it4i/anselm/software/comsol-multiphysics.md
@@ -23,23 +23,23 @@ On the Anselm cluster COMSOL is available in the latest stable version. There ar
 
 To load the of COMSOL load the module
 
-```bash
-    $ module load comsol
+```console
+$ ml comsol
 ```
 
 By default the **EDU variant** will be loaded. If user needs other version or variant, load the particular version. To obtain the list of available versions use
 
-```bash
-    $ module avail comsol
+```console
+$ ml av comsol
 ```
 
 If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the Virtual Network Computing (VNC).
 
-```bash
-    $ xhost +
-    $ qsub -I -X -A PROJECT_ID -q qprod -l select=1:ncpus=16
-    $ module load comsol
-    $ comsol
+```console
+$ xhost +
+$ qsub -I -X -A PROJECT_ID -q qprod -l select=1:ncpus=16
+$ ml comsol
+$ comsol
 ```
 
 To run COMSOL in batch mode, without the COMSOL Desktop GUI environment, user can utilized the default (comsol.pbs) job script and execute it via the qsub command.
@@ -75,14 +75,14 @@ Working directory has to be created before sending the (comsol.pbs) job script i
 
 COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB.
 
-LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On Anselm 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](../isv_licenses/)) are available.
+LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On Anselm 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](isv_licenses/)) are available.
 Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode.
 
-```bash
+```console
 $ xhost +
 $ qsub -I -X -A PROJECT_ID -q qexp -l select=1:ncpus=16
-$ module load matlab
-$ module load comsol
+$ ml matlab
+$ ml comsol
 $ comsol server matlab
 ```
 
diff --git a/docs.it4i/anselm/software/debuggers/allinea-ddt.md b/docs.it4i/anselm/software/debuggers/allinea-ddt.md
index 6c1c664fb22163d3f9eadd023486494870f2a0a9..f85848417002cc5c9f15d54ea437410ca4585f11 100644
--- a/docs.it4i/anselm/software/debuggers/allinea-ddt.md
+++ b/docs.it4i/anselm/software/debuggers/allinea-ddt.md
@@ -24,20 +24,20 @@ In case of debugging on accelerators:
 
 Load all necessary modules to compile the code. For example:
 
-```bash
-    $ module load intel
-    $ module load impi   ... or ... module load openmpi/X.X.X-icc
+```console
+$ ml intel
+$ ml impi   ... or ... module load openmpi/X.X.X-icc
 ```
 
 Load the Allinea DDT module:
 
-```bash
-    $ module load Forge
+```console
+$ ml Forge
 ```
 
 Compile the code:
 
-```bash
+```console
 $ mpicc -g -O0 -o test_debug test.c
 
 $ mpif90 -g -O0 -o test_debug test.f
@@ -55,22 +55,22 @@ Before debugging, you need to compile your code with theses flags:
 
 Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh:
 
-```bash
-    $ ssh -X username@anselm.it4i.cz
+```console
+$ ssh -X username@anselm.it4i.cz
 ```
 
 Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)
 
 From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command:
 
-```bash
-    $ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
+```console
+$ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
 ```
 
 Then launch the debugger with the ddt command followed by the name of the executable to debug:
 
-```bash
-    $ ddt test_debug
+```console
+$ ddt test_debug
 ```
 
 A submission window that appears have a prefilled path to the executable to debug. You can select the number of MPI processors and/or OpenMP threads on which to run and press run. Command line arguments to a program can be entered to the "Arguments " box.
@@ -79,16 +79,16 @@ A submission window that appears have a prefilled path to the executable to debu
 
 To start the debugging directly without the submission window, user can specify the debugging and execution parameters from the command line. For example the number of MPI processes is set by option "-np 4". Skipping the dialog is done by "-start" option. To see the list of the "ddt" command line parameters, run "ddt --help".
 
-```bash
-    ddt -start -np 4 ./hello_debug_impi
+```console
+ddt -start -np 4 ./hello_debug_impi
 ```
 
 ## Documentation
 
 Users can find original User Guide after loading the DDT module:
 
-```bash
-    $DDTPATH/doc/userguide.pdf
+```console
+$DDTPATH/doc/userguide.pdf
 ```
 
 [1] Discipline, Magic, Inspiration and Science: Best Practice Debugging with Allinea DDT, Workshop conducted at LLNL by Allinea on May 10, 2013, [link](https://computing.llnl.gov/tutorials/allineaDDT/index.html)
diff --git a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
index 614e6277ba5fcb8401b9a68668626709aa143ede..a5399a61e7ae133d4c037391a1123b0170a132ec 100644
--- a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
+++ b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
@@ -12,8 +12,8 @@ Our license is limited to 64 MPI processes.
 
 Allinea Performance Reports version 6.0 is available
 
-```bash
-    $ module load PerformanceReports/6.0
+```console
+$ ml PerformanceReports/6.0
 ```
 
 The module sets up environment variables, required for using the Allinea Performance Reports. This particular command loads the default module, which is performance reports version 4.2.
@@ -25,8 +25,8 @@ The module sets up environment variables, required for using the Allinea Perform
 
 Instead of [running your MPI program the usual way](../mpi/), use the the perf report wrapper:
 
-```bash
-    $ perf-report mpirun ./mympiprog.x
+```console
+$ perf-report mpirun ./mympiprog.x
 ```
 
 The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../job-submission-and-execution/).
@@ -37,23 +37,23 @@ In this example, we will be profiling the mympiprog.x MPI program, using Allinea
 
 First, we allocate some nodes via the express queue:
 
-```bash
-    $ qsub -q qexp -l select=2:ncpus=16:mpiprocs=16:ompthreads=1 -I
+```console
+$ qsub -q qexp -l select=2:ncpus=16:mpiprocs=16:ompthreads=1 -I
     qsub: waiting for job 262197.dm2 to start
     qsub: job 262197.dm2 ready
 ```
 
 Then we load the modules and run the program the usual way:
 
-```bash
-    $ module load intel impi allinea-perf-report/4.2
-    $ mpirun ./mympiprog.x
+```console
+$ ml intel impi allinea-perf-report/4.2
+$ mpirun ./mympiprog.x
 ```
 
 Now lets profile the code:
 
-```bash
-    $ perf-report mpirun ./mympiprog.x
+```console
+$ perf-report mpirun ./mympiprog.x
 ```
 
 Performance report files [mympiprog_32p\*.txt](../../../src/mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](../../../src/mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded.
diff --git a/docs.it4i/anselm/software/debuggers/debuggers.md b/docs.it4i/anselm/software/debuggers/debuggers.md
index dd2bc60d833d9fa269c1df98d895fb969a601cd7..3d38fd6a59565a1814df261d6cc2383f9bef7c59 100644
--- a/docs.it4i/anselm/software/debuggers/debuggers.md
+++ b/docs.it4i/anselm/software/debuggers/debuggers.md
@@ -8,9 +8,9 @@ We provide state of the art programms and tools to develop, profile and debug HP
 
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI.
 
-```bash
-    $ module load intel
-    $ idb
+```console
+$ ml intel
+$ idb
 ```
 
 Read more at the [Intel Debugger](intel-suite/intel-debugger/) page.
@@ -19,9 +19,9 @@ Read more at the [Intel Debugger](intel-suite/intel-debugger/) page.
 
 Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process even if these processes are distributed across a cluster using an MPI implementation.
 
-```bash
-    $ module load Forge
-    $ forge
+```console
+$ ml Forge
+$ forge
 ```
 
 Read more at the [Allinea DDT](debuggers/allinea-ddt/) page.
@@ -30,9 +30,9 @@ Read more at the [Allinea DDT](debuggers/allinea-ddt/) page.
 
 Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs. Our license is limited to 64 MPI processes.
 
-```bash
-    $ module load PerformanceReports/6.0
-    $ perf-report mpirun -n 64 ./my_application argument01 argument02
+```console
+$ ml PerformanceReports/6.0
+$ perf-report mpirun -n 64 ./my_application argument01 argument02
 ```
 
 Read more at the [Allinea Performance Reports](debuggers/allinea-performance-reports/) page.
@@ -41,9 +41,9 @@ Read more at the [Allinea Performance Reports](debuggers/allinea-performance-rep
 
 TotalView is a source- and machine-level debugger for multi-process, multi-threaded programs. Its wide range of tools provides ways to analyze, organize, and test programs, making it easy to isolate and identify problems in individual threads and processes in programs of great complexity.
 
-```bash
-    $ module load totalview
-    $ totalview
+```console
+$ ml totalview
+$ totalview
 ```
 
 Read more at the [Totalview](debuggers/total-view/) page.
@@ -52,9 +52,9 @@ Read more at the [Totalview](debuggers/total-view/) page.
 
 Vampir is a GUI trace analyzer for traces in OTF format.
 
-```bash
-    $ module load Vampir/8.5.0
-    $ vampir
+```console
+$ ml Vampir/8.5.0
+$ vampir
 ```
 
 Read more at the [Vampir](vampir/) page.
diff --git a/docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md b/docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md
index f9e8e88dcaf2186ea59519f7a7b31305fd1287d6..b46b472b68577a3f0764199439de310a967a4bde 100644
--- a/docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md
+++ b/docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md
@@ -8,8 +8,8 @@ Intel PCM (Performance Counter Monitor) is a tool to monitor performance hardwar
 
 Currently installed version 2.6. To load the [module](../../environment-and-modules/), issue:
 
-```bash
-    $ module load intelpcm
+```console
+$ ml intelpcm
 ```
 
 ## Command Line Tools
@@ -20,15 +20,15 @@ PCM provides a set of tools to monitor system/or application.
 
  Measures memory bandwidth of your application or the whole system. Usage:
 
-```bash
-    $ pcm-memory.x <delay>|[external_program parameters]
+```console
+$ pcm-memory.x <delay>|[external_program parameters]
 ```
 
 Specify either a delay of updates in seconds or an external program to monitor. If you get an error about PMU in use, respond "y" and relaunch the program.
 
 Sample output:
 
-```bash
+```console
     ---------------------------------------||---------------------------------------
     --             Socket 0              --||--             Socket 1              --
     ---------------------------------------||---------------------------------------
@@ -77,7 +77,7 @@ This command provides an overview of performance counters and memory usage. Usag
 
 Sample output :
 
-```bash
+```console
     $ pcm.x ./matrix
 
      Intel(r) Performance Counter Monitor V2.6 (2013-11-04 13:43:31 +0100 ID=db05e43)
@@ -246,14 +246,14 @@ Sample program using the API :
 
 Compile it with :
 
-```bash
-    $ icc matrix.cpp -o matrix -lpthread -lpcm
+```console
+$ icc matrix.cpp -o matrix -lpthread -lpcm
 ```
 
 Sample output:
 
-```bash
-    $ ./matrix
+```console
+$ ./matrix
     Number of physical cores: 16
     Number of logical cores: 16
     Threads (logical cores) per physical core: 1
diff --git a/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md
index e9921046dd13f4b3b3b345f2666b426f2bd5ca9c..1d90aacfee0141246d4fbe41912ca8e3040b30db 100644
--- a/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md
+++ b/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md
@@ -16,14 +16,14 @@ Intel VTune Amplifier, part of Intel Parallel studio, is a GUI profiling tool de
 
 To launch the GUI, first load the module:
 
-```bash
-    $ module add VTune/2016_update1
+```console
+$ module add VTune/2016_update1
 ```
 
 and launch the GUI :
 
-```bash
-    $ amplxe-gui
+```console
+$ amplxe-gui
 ```
 
 !!! note
@@ -39,8 +39,8 @@ VTune Amplifier also allows a form of remote analysis. In this mode, data for an
 
 The command line will look like this:
 
-```bash
-    /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -collect advanced-hotspots -knob collection-detail=stack-and-callcount -mrte-mode=native -target-duration-type=veryshort -app-working-dir /home/sta545/test -- /home/sta545/test_pgsesv
+```console
+$ /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -collect advanced-hotspots -knob collection-detail=stack-and-callcount -mrte-mode=native -target-duration-type=veryshort -app-working-dir /home/sta545/test -- /home/sta545/test_pgsesv
 ```
 
 Copy the line to clipboard and then you can paste it in your jobscript or in command line. After the collection is run, open the GUI once again, click the menu button in the upper right corner, and select "_Open > Result..._". The GUI will load the results from the run.
@@ -63,8 +63,8 @@ Note that we include source ~/.profile in the command to setup environment paths
 
 You may also use remote analysis to collect data from the MIC and then analyze it in the GUI later :
 
-```bash
-    $ amplxe-cl -collect knc-hotspots -no-auto-finalize -- ssh mic0
+```console
+$ amplxe-cl -collect knc-hotspots -no-auto-finalize -- ssh mic0
     "export LD_LIBRARY_PATH=/apps/intel/composer_xe_2015.2.164/compiler/lib/mic/:/apps/intel/composer_xe_2015.2.164/mkl/lib/mic/; export KMP_AFFINITY=compact; /tmp/app.mic"
 ```
 
diff --git a/docs.it4i/anselm/software/debuggers/papi.md b/docs.it4i/anselm/software/debuggers/papi.md
index bc36923e83e2d464b40e41b3b43ce4316289c3f4..d03dd8354769895e3b7f8454f5a0dd613a626bc3 100644
--- a/docs.it4i/anselm/software/debuggers/papi.md
+++ b/docs.it4i/anselm/software/debuggers/papi.md
@@ -12,8 +12,8 @@ PAPI can be used with parallel as well as serial programs.
 
 To use PAPI, load [module](../../environment-and-modules/) papi:
 
-```bash
-    $ module load papi
+```console
+$ ml papi
 ```
 
 This will load the default version. Execute module avail papi for a list of installed versions.
@@ -26,8 +26,8 @@ The bin directory of PAPI (which is automatically added to  $PATH upon loading t
 
 Prints which preset events are available on the current CPU. The third column indicated whether the preset event is available on the current CPU.
 
-```bash
-    $ papi_avail
+```console
+$ papi_avail
     Available events and hardware information.
     --------------------------------------------------------------------------------
     PAPI Version : 5.3.2.0
@@ -108,7 +108,7 @@ PAPI can be used to query some system infromation, such as CPU name and MHz. [Se
 
 The following example prints MFLOPS rate of a naive matrix-matrix multiplication:
 
-```bash
+```cpp
     #include <stdlib.h>
     #include <stdio.h>
     #include "papi.h"
@@ -149,9 +149,9 @@ The following example prints MFLOPS rate of a naive matrix-matrix multiplication
 
 Now compile and run the example :
 
-```bash
-    $ gcc matrix.c -o matrix -lpapi
-    $ ./matrix
+```console
+$ gcc matrix.c -o matrix -lpapi
+$ ./matrix
     Real_time: 8.852785
     Proc_time: 8.850000
     Total flpins: 6012390908
@@ -160,9 +160,9 @@ Now compile and run the example :
 
 Let's try with optimizations enabled :
 
-```bash
-    $ gcc -O3 matrix.c -o matrix -lpapi
-    $ ./matrix
+```console
+$ gcc -O3 matrix.c -o matrix -lpapi
+$ ./matrix
     Real_time: 0.000020
     Proc_time: 0.000000
     Total flpins: 6
@@ -179,9 +179,9 @@ Now we see a seemingly strange result - the multiplication took no time and only
 
 Now the compiler won't remove the multiplication loop. (However it is still not that smart to see that the result won't ever be negative). Now run the code again:
 
-```bash
-    $ gcc -O3 matrix.c -o matrix -lpapi
-    $ ./matrix
+```console
+$ gcc -O3 matrix.c -o matrix -lpapi
+$ ./matrix
     Real_time: 8.795956
     Proc_time: 8.790000
     Total flpins: 18700983160
@@ -195,39 +195,39 @@ Now the compiler won't remove the multiplication loop. (However it is still not
 
 To use PAPI in [Intel Xeon Phi](../intel-xeon-phi/) native applications, you need to load module with " -mic" suffix, for example " papi/5.3.2-mic" :
 
-```bash
-    $ module load papi/5.3.2-mic
+```console
+$ ml papi/5.3.2-mic
 ```
 
 Then, compile your application in the following way:
 
-```bash
-    $ module load intel
-    $ icc -mmic -Wl,-rpath,/apps/intel/composer_xe_2013.5.192/compiler/lib/mic matrix-mic.c -o matrix-mic -lpapi -lpfm
+```console
+$ ml intel
+$ icc -mmic -Wl,-rpath,/apps/intel/composer_xe_2013.5.192/compiler/lib/mic matrix-mic.c -o matrix-mic -lpapi -lpfm
 ```
 
 To execute the application on MIC, you need to manually set LD_LIBRARY_PATH:
 
-```bash
-    $ qsub -q qmic -A NONE-0-0 -I
-    $ ssh mic0
-    $ export LD_LIBRARY_PATH=/apps/tools/papi/5.4.0-mic/lib/
-    $ ./matrix-mic
+```console
+$ qsub -q qmic -A NONE-0-0 -I
+$ ssh mic0
+$ export LD_LIBRARY_PATH="/apps/tools/papi/5.4.0-mic/lib/"
+$ ./matrix-mic
 ```
 
 Alternatively, you can link PAPI statically (-static flag), then LD_LIBRARY_PATH does not need to be set.
 
 You can also execute the PAPI tools on MIC :
 
-```bash
-    $ /apps/tools/papi/5.4.0-mic/bin/papi_native_avail
+```console
+$ /apps/tools/papi/5.4.0-mic/bin/papi_native_avail
 ```
 
 To use PAPI in offload mode, you need to provide both host and MIC versions of PAPI:
 
-```bash
-    $ module load papi/5.4.0
-    $ icc matrix-offload.c -o matrix-offload -offload-option,mic,compiler,"-L$PAPI_HOME-mic/lib -lpapi" -lpapi
+```console
+$ ml papi/5.4.0
+$ icc matrix-offload.c -o matrix-offload -offload-option,mic,compiler,"-L$PAPI_HOME-mic/lib -lpapi" -lpapi
 ```
 
 ## References
diff --git a/docs.it4i/anselm/software/debuggers/scalasca.md b/docs.it4i/anselm/software/debuggers/scalasca.md
index 19daec04e24247f40721c8ef61632d17290daa80..a7cd44b1d5236eb3e257a24f5a3cfbdb96e6b0f5 100644
--- a/docs.it4i/anselm/software/debuggers/scalasca.md
+++ b/docs.it4i/anselm/software/debuggers/scalasca.md
@@ -33,8 +33,8 @@ After the application is instrumented, runtime measurement can be performed with
 
 An example :
 
-```bash
-    $ scalasca -analyze mpirun -np 4 ./mympiprogram
+```console
+   $ scalasca -analyze mpirun -np 4 ./mympiprogram
 ```
 
 Some notable Scalasca options are:
@@ -51,13 +51,13 @@ For the analysis, you must have [Score-P](score-p/) and [CUBE](cube/) modules lo
 
 To launch the analysis, run :
 
-```bash
+```console
 scalasca -examine [options] <experiment_directory>
 ```
 
 If you do not wish to launch the GUI tool, use the "-s" option :
 
-```bash
+```console
 scalasca -examine -s <experiment_directory>
 ```
 
diff --git a/docs.it4i/anselm/software/debuggers/score-p.md b/docs.it4i/anselm/software/debuggers/score-p.md
index 929d971faa2a8b465754c5563b09fa32f554eef2..3295933c45e6c7f8b7275a5bede4cef5064bd49f 100644
--- a/docs.it4i/anselm/software/debuggers/score-p.md
+++ b/docs.it4i/anselm/software/debuggers/score-p.md
@@ -25,7 +25,7 @@ There are three ways to instrument your parallel applications in order to enable
 
 is the easiest method. Score-P will automatically add instrumentation to every routine entry and exit using compiler hooks, and will intercept MPI calls and OpenMP regions. This method might, however, produce a large number of data. If you want to focus on profiler a specific regions of your code, consider using the manual instrumentation methods. To use automated instrumentation, simply prepend scorep to your compilation command. For example, replace:
 
-```bash
+```console
 $ mpif90 -c foo.f90
 $ mpif90 -c bar.f90
 $ mpif90 -o myapp foo.o bar.o
@@ -33,7 +33,7 @@ $ mpif90 -o myapp foo.o bar.o
 
 with:
 
-```bash
+```console
 $ scorep mpif90 -c foo.f90
 $ scorep mpif90 -c bar.f90
 $ scorep mpif90 -o myapp foo.o bar.o
diff --git a/docs.it4i/anselm/software/debuggers/total-view.md b/docs.it4i/anselm/software/debuggers/total-view.md
index b4f710675111efe35ea5779625ac53046bc2722b..de618ace58562f36720e41a5dbb603c9b2478c06 100644
--- a/docs.it4i/anselm/software/debuggers/total-view.md
+++ b/docs.it4i/anselm/software/debuggers/total-view.md
@@ -6,7 +6,7 @@ TotalView is a GUI-based source code multi-process, multi-thread debugger.
 
 On Anselm users can debug OpenMP or MPI code that runs up to 64 parallel processes. These limitation means that:
 
-```bash
+```console
     1 user can debug up 64 processes, or
     32 users can debug 2 processes, etc.
 ```
@@ -15,8 +15,8 @@ Debugging of GPU accelerated codes is also supported.
 
 You can check the status of the licenses here:
 
-```bash
-    cat /apps/user/licenses/totalview_features_state.txt
+```console
+$ cat /apps/user/licenses/totalview_features_state.txt
 
     # totalview
     # -------------------------------------------------
@@ -33,24 +33,21 @@ You can check the status of the licenses here:
 
 Load all necessary modules to compile the code. For example:
 
-```bash
-    module load intel
-
-    module load impi   ... or ... module load openmpi/X.X.X-icc
+```console
+$ ml intel **or** ml foss
 ```
 
 Load the TotalView module:
 
-```bash
-    module load totalview/8.12
+```console
+$ ml totalview/8.12
 ```
 
 Compile the code:
 
-```bash
-    mpicc -g -O0 -o test_debug test.c
-
-    mpif90 -g -O0 -o test_debug test.f
+```console
+$ mpicc -g -O0 -o test_debug test.c
+$ mpif90 -g -O0 -o test_debug test.f
 ```
 
 ### Compiler Flags
@@ -65,16 +62,16 @@ Before debugging, you need to compile your code with theses flags:
 
 Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh:
 
-```bash
-    ssh -X username@anselm.it4i.cz
+```console
+local $ ssh -X username@anselm.it4i.cz
 ```
 
 Other options is to access login node using VNC. Please see the detailed information on how to use graphic user interface on Anselm.
 
 From the login node an interactive session with X windows forwarding (-X option) can be started by following command:
 
-```bash
-    qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
+```console
+$ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
 ```
 
 Then launch the debugger with the totalview command followed by the name of the executable to debug.
@@ -83,8 +80,8 @@ Then launch the debugger with the totalview command followed by the name of the
 
 To debug a serial code use:
 
-```bash
-    totalview test_debug
+```console
+$ totalview test_debug
 ```
 
 ### Debugging a Parallel Code - Option 1
@@ -94,7 +91,7 @@ To debug a parallel code compiled with **OpenMPI** you need to setup your TotalV
 !!! hint
     To be able to run parallel debugging procedure from the command line without stopping the debugger in the mpiexec source code you have to add the following function to your `~/.tvdrc` file:
 
-```bash
+```console
     proc mpi_auto_run_starter {loaded_id} {
         set starter_programs {mpirun mpiexec orterun}
         set executable_name [TV::symbol get $loaded_id full_pathname]
@@ -116,8 +113,8 @@ To debug a parallel code compiled with **OpenMPI** you need to setup your TotalV
 
 The source code of this function can be also found in
 
-```bash
-    /apps/mpi/openmpi/intel/1.6.5/etc/openmpi-totalview.tcl
+```console
+$ /apps/mpi/openmpi/intel/1.6.5/etc/openmpi-totalview.tcl
 ```
 
 !!! note
@@ -128,8 +125,8 @@ You need to do this step only once.
 
 Now you can run the parallel debugger using:
 
-```bash
-    mpirun -tv -n 5 ./test_debug
+```console
+$ mpirun -tv -n 5 ./test_debug
 ```
 
 When following dialog appears click on "Yes"
@@ -146,10 +143,10 @@ Other option to start new parallel debugging session from a command line is to l
 
 The following example shows how to start debugging session with Intel MPI:
 
-```bash
-    module load intel/13.5.192 impi/4.1.1.036 totalview/8/13
-
-    totalview -mpi "Intel MPI-Hydra" -np 8 ./hello_debug_impi
+```console
+$ ml intel
+$ ml totalview
+$ totalview -mpi "Intel MPI-Hydra" -np 8 ./hello_debug_impi
 ```
 
 After running previous command you will see the same window as shown in the screenshot above.
diff --git a/docs.it4i/anselm/software/debuggers/valgrind.md b/docs.it4i/anselm/software/debuggers/valgrind.md
index 2602fdbf24c9bdf16503740541ed81c536628b5a..0e381e945c86c1a53af181b8cb62194171535bee 100644
--- a/docs.it4i/anselm/software/debuggers/valgrind.md
+++ b/docs.it4i/anselm/software/debuggers/valgrind.md
@@ -48,9 +48,9 @@ For example, lets look at this C code, which has two problems :
 
 Now, compile it with Intel compiler :
 
-```bash
-    $ module add intel
-    $ icc -g valgrind-example.c -o valgrind-example
+```console
+$ module add intel
+$ icc -g valgrind-example.c -o valgrind-example
 ```
 
 Now, lets run it with Valgrind. The syntax is :
@@ -59,8 +59,8 @@ Now, lets run it with Valgrind. The syntax is :
 
 If no Valgrind options are specified, Valgrind defaults to running Memcheck tool. Please refer to the Valgrind documentation for a full description of command line options.
 
-```bash
-    $ valgrind ./valgrind-example
+```console
+$ valgrind ./valgrind-example
     ==12652== Memcheck, a memory error detector
     ==12652== Copyright (C) 2002-2013, and GNU GPL'd, by Julian Seward et al.
     ==12652== Using Valgrind-3.9.0 and LibVEX; rerun with -h for copyright info
@@ -93,8 +93,8 @@ If no Valgrind options are specified, Valgrind defaults to running Memcheck tool
 
 In the output we can see that Valgrind has detected both errors - the off-by-one memory access at line 5 and a memory leak of 40 bytes. If we want a detailed analysis of the memory leak, we need to run Valgrind with  --leak-check=full option :
 
-```bash
-    $ valgrind --leak-check=full ./valgrind-example
+```console
+$ valgrind --leak-check=full ./valgrind-example
     ==23856== Memcheck, a memory error detector
     ==23856== Copyright (C) 2002-2010, and GNU GPL'd, by Julian Seward et al.
     ==23856== Using Valgrind-3.6.0 and LibVEX; rerun with -h for copyright info
@@ -135,13 +135,13 @@ Now we can see that the memory leak is due to the malloc() at line 6.
 
 Although Valgrind is not primarily a parallel debugger, it can be used to debug parallel applications as well. When launching your parallel applications, prepend the valgrind command. For example :
 
-```bash
-    $ mpirun -np 4 valgrind myapplication
+```console
+$ mpirun -np 4 valgrind myapplication
 ```
 
 The default version without MPI support will however report a large number of false errors in the MPI library, such as :
 
-```bash
+```console
     ==30166== Conditional jump or move depends on uninitialised value(s)
     ==30166== at 0x4C287E8: strlen (mc_replace_strmem.c:282)
     ==30166== by 0x55443BD: I_MPI_Processor_model_number (init_interface.c:427)
@@ -178,16 +178,16 @@ Lets look at this MPI example :
 
 There are two errors - use of uninitialized memory and invalid length of the buffer. Lets debug it with valgrind :
 
-```bash
-    $ module add intel impi
-    $ mpicc -g valgrind-example-mpi.c -o valgrind-example-mpi
-    $ module add valgrind/3.9.0-impi
-    $ mpirun -np 2 -env LD_PRELOAD /apps/tools/valgrind/3.9.0/impi/lib/valgrind/libmpiwrap-amd64-linux.so valgrind ./valgrind-example-mpi
+```console
+$ module add intel impi
+$ mpicc -g valgrind-example-mpi.c -o valgrind-example-mpi
+$ module add valgrind/3.9.0-impi
+$ mpirun -np 2 -env LD_PRELOAD /apps/tools/valgrind/3.9.0/impi/lib/valgrind/libmpiwrap-amd64-linux.so valgrind ./valgrind-example-mpi
 ```
 
 Prints this output : (note that there is output printed for every launched MPI process)
 
-```bash
+```console
     ==31318== Memcheck, a memory error detector
     ==31318== Copyright (C) 2002-2013, and GNU GPL'd, by Julian Seward et al.
     ==31318== Using Valgrind-3.9.0 and LibVEX; rerun with -h for copyright info
diff --git a/docs.it4i/anselm/software/debuggers/vampir.md b/docs.it4i/anselm/software/debuggers/vampir.md
index 1c3009c8a4fe820473b812ec0067a83e3d1922d7..1dfa23e7b8eed6c9deaf04439df6b01ed6358480 100644
--- a/docs.it4i/anselm/software/debuggers/vampir.md
+++ b/docs.it4i/anselm/software/debuggers/vampir.md
@@ -8,9 +8,9 @@ Vampir is a commercial trace analysis and visualization tool. It can work with t
 
 Version 8.5.0 is currently installed as module Vampir/8.5.0 :
 
-```bash
-    $ module load Vampir/8.5.0
-    $ vampir &
+```console
+$ ml Vampir/8.5.0
+$ vampir &
 ```
 
 ## User Manual
diff --git a/docs.it4i/anselm/software/gpi2.md b/docs.it4i/anselm/software/gpi2.md
index ec96e2653a3bfeb9614be13b969ff3273b3ee255..09241e15a96f7412f2e7652efda091d7868cd5d1 100644
--- a/docs.it4i/anselm/software/gpi2.md
+++ b/docs.it4i/anselm/software/gpi2.md
@@ -10,8 +10,8 @@ The GPI-2 library ([www.gpi-site.com/gpi2/](http://www.gpi-site.com/gpi2/)) impl
 
 The GPI-2, version 1.0.2 is available on Anselm via module gpi2:
 
-```bash
-    $ module load gpi2
+```console
+$ ml gpi2
 ```
 
 The module sets up environment variables, required for linking and running GPI-2 enabled applications. This particular command loads the default module, which is gpi2/1.0.2
@@ -25,18 +25,18 @@ Load the gpi2 module. Link using **-lGPI2** and **-libverbs** switches to link y
 
 ### Compiling and Linking With Intel Compilers
 
-```bash
-    $ module load intel
-    $ module load gpi2
-    $ icc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs
+```console
+$ ml intel
+$ ml gpi2
+$ icc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs
 ```
 
 ### Compiling and Linking With GNU Compilers
 
-```bash
-    $ module load gcc
-    $ module load gpi2
-    $ gcc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs
+```console
+$ ml gcc
+$ ml gpi2
+$ gcc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs
 ```
 
 ## Running the GPI-2 Codes
@@ -46,19 +46,19 @@ Load the gpi2 module. Link using **-lGPI2** and **-libverbs** switches to link y
 
 The gaspi_run utility is used to start and run GPI-2 applications:
 
-```bash
-    $ gaspi_run -m machinefile ./myprog.x
+```console
+$ gaspi_run -m machinefile ./myprog.x
 ```
 
 A machine file (** machinefile **) with the hostnames of nodes where the application will run, must be provided. The machinefile lists all nodes on which to run, one entry per node per process. This file may be hand created or obtained from standard $PBS_NODEFILE:
 
-```bash
-    $ cut -f1 -d"." $PBS_NODEFILE > machinefile
+```console
+$ cut -f1 -d"." $PBS_NODEFILE > machinefile
 ```
 
 machinefile:
 
-```bash
+```console
     cn79
     cn80
 ```
@@ -67,7 +67,7 @@ This machinefile will run 2 GPI-2 processes, one on node cn79 other on node cn80
 
 machinefle:
 
-```bash
+```console
     cn79
     cn79
     cn80
@@ -81,8 +81,8 @@ This machinefile will run 4 GPI-2 processes, 2 on node cn79 o 2 on node cn80.
 
 Example:
 
-```bash
-    $ qsub -A OPEN-0-0 -q qexp -l select=2:ncpus=16:mpiprocs=16 -I
+```console
+$ qsub -A OPEN-0-0 -q qexp -l select=2:ncpus=16:mpiprocs=16 -I
 ```
 
 This example will produce $PBS_NODEFILE with 16 entries per node.
@@ -137,29 +137,28 @@ Following is an example GPI-2 enabled code:
 
 Load modules and compile:
 
-```bash
-    $ module load gcc gpi2
-    $ gcc helloworld_gpi.c -o helloworld_gpi.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs
+```console
+$ ml gcc gpi2
+$ gcc helloworld_gpi.c -o helloworld_gpi.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs
 ```
 
 Submit the job and run the GPI-2 application
 
-```bash
-    $ qsub -q qexp -l select=2:ncpus=1:mpiprocs=1,place=scatter,walltime=00:05:00 -I
+```console
+$ qsub -q qexp -l select=2:ncpus=1:mpiprocs=1,place=scatter,walltime=00:05:00 -I
     qsub: waiting for job 171247.dm2 to start
     qsub: job 171247.dm2 ready
-
-    cn79 $ module load gpi2
-    cn79 $ cut -f1 -d"." $PBS_NODEFILE > machinefile
-    cn79 $ gaspi_run -m machinefile ./helloworld_gpi.x
+cn79 $ ml gpi2
+cn79 $ cut -f1 -d"." $PBS_NODEFILE > machinefile
+cn79 $ gaspi_run -m machinefile ./helloworld_gpi.x
     Hello from rank 0 of 2
 ```
 
 At the same time, in another session, you may start the gaspi logger:
 
-```bash
-    $ ssh cn79
-    cn79 $ gaspi_logger
+```console
+$ ssh cn79
+cn79 $ gaspi_logger
     GASPI Logger (v1.1)
     [cn80:0] Hello from rank 1 of 2
 ```
diff --git a/docs.it4i/anselm/software/intel-suite/intel-compilers.md b/docs.it4i/anselm/software/intel-suite/intel-compilers.md
index 66de3b77a06d7333464336ada10d68cd3a899aa8..d446655d915833a139353d5c76015f70db9a9645 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-compilers.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-compilers.md
@@ -2,28 +2,28 @@
 
 The Intel compilers version 13.1.1 are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler.
 
-```bash
-    $ module load intel
-    $ icc -v
-    $ ifort -v
+```console
+$ ml intel
+$ icc -v
+$ ifort -v
 ```
 
 The intel compilers provide for vectorization of the code, via the AVX instructions and support threading parallelization via OpenMP
 
 For maximum performance on the Anselm cluster, compile your programs using the AVX instructions, with reporting where the vectorization was used. We recommend following compilation options for high performance
 
-```bash
-    $ icc   -ipo -O3 -vec -xAVX -vec-report1 myprog.c mysubroutines.c -o myprog.x
-    $ ifort -ipo -O3 -vec -xAVX -vec-report1 myprog.f mysubroutines.f -o myprog.x
+```console
+$ icc   -ipo -O3 -vec -xAVX -vec-report1 myprog.c mysubroutines.c -o myprog.x
+$ ifort -ipo -O3 -vec -xAVX -vec-report1 myprog.f mysubroutines.f -o myprog.x
 ```
 
 In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggressive loop optimizations (-O3) and vectorization (-vec -xAVX)
 
 The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP parallelization and AVX vectorization. Enable the OpenMP parallelization by the **-openmp** compiler switch.
 
-```bash
-    $ icc -ipo -O3 -vec -xAVX -vec-report1 -openmp myprog.c mysubroutines.c -o myprog.x
-    $ ifort -ipo -O3 -vec -xAVX -vec-report1 -openmp myprog.f mysubroutines.f -o myprog.x
+```console
+$ icc -ipo -O3 -vec -xAVX -vec-report1 -openmp myprog.c mysubroutines.c -o myprog.x
+$ ifort -ipo -O3 -vec -xAVX -vec-report1 -openmp myprog.f mysubroutines.f -o myprog.x
 ```
 
 Read more at <http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/compiler/cpp-lin/index.htm>
diff --git a/docs.it4i/anselm/software/intel-suite/intel-debugger.md b/docs.it4i/anselm/software/intel-suite/intel-debugger.md
index f13086df7431676a95a75b5258a10667a3464c57..d3a5807fca1a0051c4424a5613f3faa57c26895a 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-debugger.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-debugger.md
@@ -4,30 +4,30 @@
 
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI.
 
-```bash
-    $ module load intel
-    $ idb
+```baconsolesh
+$ ml intel
+$ idb
 ```
 
 The debugger may run in text mode. To debug in text mode, use
 
-```bash
-    $ idbc
+```console
+$ idbc
 ```
 
 To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in the GUI section
 
 Example:
 
-```bash
-    $ qsub -q qexp -l select=1:ncpus=16 -X -I
+```console
+$ qsub -q qexp -l select=1:ncpus=16 -X -I
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19654.srv11 ready
 
-    $ module load intel
-    $ module load java
-    $ icc -O0 -g myprog.c -o myprog.x
-    $ idb ./myprog.x
+$ ml intel
+$ ml java
+$ icc -O0 -g myprog.c -o myprog.x
+$ idb ./myprog.x
 ```
 
 In this example, we allocate 1 full compute node, compile program myprog.c with debugging options -O0 -g and run the idb debugger interactively on the myprog.x executable. The GUI access is via X11 port forwarding provided by the PBS workload manager.
@@ -40,13 +40,13 @@ Intel debugger is capable of debugging multithreaded and MPI parallel programs a
 
 For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the X display. Using Intel MPI, this may be done in following way:
 
-```bash
-    $ qsub -q qexp -l select=2:ncpus=16 -X -I
+```console
+$ qsub -q qexp -l select=2:ncpus=16 -X -I
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19655.srv11 ready
 
-    $ module load intel impi
-    $ mpirun -ppn 1 -hostfile $PBS_NODEFILE --enable-x xterm -e idbc ./mympiprog.x
+$ ml intel
+$ mpirun -ppn 1 -hostfile $PBS_NODEFILE --enable-x xterm -e idbc ./mympiprog.x
 ```
 
 In this example, we allocate 2 full compute node, run xterm on each node and start idb debugger in command line mode, debugging two ranks of mympiprog.x application. The xterm will pop up for each rank, with idb prompt ready. The example is not limited to use of Intel MPI
@@ -55,13 +55,13 @@ In this example, we allocate 2 full compute node, run xterm on each node and sta
 
 Run the idb debugger from within the MPI debug option. This will cause the debugger to bind to all ranks and provide aggregated outputs across the ranks, pausing execution automatically just after startup. You may then set break points and step the execution manually. Using Intel MPI:
 
-```bash
+```console
     $ qsub -q qexp -l select=2:ncpus=16 -X -I
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19655.srv11 ready
 
-    $ module load intel impi
-    $ mpirun -n 32 -idb ./mympiprog.x
+$ ml intel
+$ mpirun -n 32 -idb ./mympiprog.x
 ```
 
 ### Debugging Multithreaded Application
diff --git a/docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md b/docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md
index b92f8d05f62d9305f9624e592d388cf2744b5081..8e0451c69a082275e114c92acd223e3514317389 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md
@@ -7,8 +7,8 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX vector
 !!! note
     Check out IPP before implementing own math functions for data processing, it is likely already there.
 
-```bash
-    $ module load ipp
+```console
+$ ml ipp
 ```
 
 The module sets up environment variables, required for linking and running ipp enabled applications.
@@ -58,20 +58,20 @@ The module sets up environment variables, required for linking and running ipp e
 
 Compile above example, using any compiler and the ipp module.
 
-```bash
-    $ module load intel
-    $ module load ipp
+```console
+$ ml intel
+$ ml ipp
 
-    $ icc testipp.c -o testipp.x -lippi -lipps -lippcore
+$ icc testipp.c -o testipp.x -lippi -lipps -lippcore
 ```
 
 You will need the ipp module loaded to run the ipp enabled executable. This may be avoided, by compiling library search paths into the executable
 
-```bash
-    $ module load intel
-    $ module load ipp
+```console
+$ ml intel
+$ ml ipp
 
-    $ icc testipp.c -o testipp.x -Wl,-rpath=$LIBRARY_PATH -lippi -lipps -lippcore
+$ icc testipp.c -o testipp.x -Wl,-rpath=$LIBRARY_PATH -lippi -lipps -lippcore
 ```
 
 ## Code Samples and Documentation
diff --git a/docs.it4i/anselm/software/intel-suite/intel-mkl.md b/docs.it4i/anselm/software/intel-suite/intel-mkl.md
index aed92ae69da6f721f676fa5e4180945711fe5fba..6594f8193b800fa1fb269b8611456c6311adafcf 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-mkl.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-mkl.md
@@ -15,10 +15,10 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e
 
 For details see the [Intel MKL Reference Manual](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mklman/index.htm).
 
-Intel MKL version 13.5.192 is available on Anselm
+Intel MKL is available on Anselm
 
-```bash
-    $ module load mkl
+```console
+$ ml imkl
 ```
 
 The module sets up environment variables, required for linking and running mkl enabled applications. The most important variables are the $MKLROOT, $MKL_INC_DIR, $MKL_LIB_DIR and $MKL_EXAMPLES
@@ -41,8 +41,8 @@ Linking MKL libraries may be complex. Intel [mkl link line advisor](http://softw
 
 You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line:
 
-```bash
-    $ icc .... -Wl,-rpath=$LIBRARY_PATH ...
+```console
+$ icc .... -Wl,-rpath=$LIBRARY_PATH ...
 ```
 
 ### Threading
@@ -52,9 +52,9 @@ You will need the mkl module loaded to run the mkl enabled executable. This may
 
 For this to work, the application must link the threaded MKL library (default). Number and behaviour of MKL threads may be controlled via the OpenMP environment variables, such as OMP_NUM_THREADS and KMP_AFFINITY. MKL_NUM_THREADS takes precedence over OMP_NUM_THREADS
 
-```bash
-    $ export OMP_NUM_THREADS=16
-    $ export KMP_AFFINITY=granularity=fine,compact,1,0
+```console
+$ export OMP_NUM_THREADS=16
+$ export KMP_AFFINITY=granularity=fine,compact,1,0
 ```
 
 The application will run with 16 threads with affinity optimized for fine grain parallelization.
@@ -65,50 +65,42 @@ Number of examples, demonstrating use of the MKL library and its linking is avai
 
 ### Working With Examples
 
-```bash
-    $ module load intel
-    $ module load mkl
-    $ cp -a $MKL_EXAMPLES/cblas /tmp/
-    $ cd /tmp/cblas
-
-    $ make sointel64 function=cblas_dgemm
+```console
+$ ml intel
+$ cp -a $MKL_EXAMPLES/cblas /tmp/
+$ cd /tmp/cblas
+$ make sointel64 function=cblas_dgemm
 ```
 
 In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL example suite installed on Anselm.
 
 ### Example: MKL and Intel Compiler
 
-```bash
-    $ module load intel
-    $ module load mkl
-    $ cp -a $MKL_EXAMPLES/cblas /tmp/
-    $ cd /tmp/cblas
-    $
-    $ icc -w source/cblas_dgemmx.c source/common_func.c -mkl -o cblas_dgemmx.x
-    $ ./cblas_dgemmx.x data/cblas_dgemmx.d
+```console
+$ ml intel
+$ cp -a $MKL_EXAMPLES/cblas /tmp/
+$ cd /tmp/cblas
+$ icc -w source/cblas_dgemmx.c source/common_func.c -mkl -o cblas_dgemmx.x
+$ ./cblas_dgemmx.x data/cblas_dgemmx.d
 ```
 
 In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL with icc -mkl option. Using the -mkl option is equivalent to:
 
-```bash
-    $ icc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x
-    -I$MKL_INC_DIR -L$MKL_LIB_DIR -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5
+```console
+$ icc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x -I$MKL_INC_DIR -L$MKL_LIB_DIR -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5
 ```
 
 In this example, we compile and link the cblas_dgemm example, using LP64 interface to threaded MKL and Intel OMP threads implementation.
 
 ### Example: MKL and GNU Compiler
 
-```bash
-    $ module load gcc
-    $ module load mkl
-    $ cp -a $MKL_EXAMPLES/cblas /tmp/
-    $ cd /tmp/cblas
-
-    $ gcc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x
-    -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lm
-
-    $ ./cblas_dgemmx.x data/cblas_dgemmx.d
+```console
+$ ml gcc
+$ ml imkl
+$ cp -a $MKL_EXAMPLES/cblas /tmp/
+$ cd /tmp/cblas
+$ gcc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lm
+$ ./cblas_dgemmx.x data/cblas_dgemmx.d
 ```
 
 In this example, we compile, link and run the cblas_dgemm example, using LP64 interface to threaded MKL and gnu OMP threads implementation.
diff --git a/docs.it4i/anselm/software/intel-suite/intel-tbb.md b/docs.it4i/anselm/software/intel-suite/intel-tbb.md
index 3c2495ba8c0592df6556ab7c41c078dd3cedf5af..497b26f5e46a62604b7eb542bd0579b2c7fbd358 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-tbb.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-tbb.md
@@ -7,8 +7,8 @@ be offloaded to [MIC accelerator](../intel-xeon-phi/).
 
 Intel TBB version 4.1 is available on Anselm
 
-```bash
-    $ module load tbb
+```console
+$ ml tbb
 ```
 
 The module sets up environment variables, required for linking and running tbb enabled applications.
@@ -20,21 +20,21 @@ The module sets up environment variables, required for linking and running tbb e
 
 Number of examples, demonstrating use of TBB and its built-in scheduler is available on Anselm, in the $TBB_EXAMPLES directory.
 
-```bash
-    $ module load intel
-    $ module load tbb
-    $ cp -a $TBB_EXAMPLES/common $TBB_EXAMPLES/parallel_reduce /tmp/
-    $ cd /tmp/parallel_reduce/primes
-    $ icc -O2 -DNDEBUG -o primes.x main.cpp primes.cpp -ltbb
-    $ ./primes.x
+```console
+$ ml intel
+$ ml tbb
+$ cp -a $TBB_EXAMPLES/common $TBB_EXAMPLES/parallel_reduce /tmp/
+$ cd /tmp/parallel_reduce/primes
+$ icc -O2 -DNDEBUG -o primes.x main.cpp primes.cpp -ltbb
+$ ./primes.x
 ```
 
 In this example, we compile, link and run the primes example, demonstrating use of parallel task-based reduce in computation of prime numbers.
 
 You will need the tbb module loaded to run the tbb enabled executable. This may be avoided, by compiling library search paths into the executable.
 
-```bash
-    $ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb
+```console
+$ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb
 ```
 
 ## Further Reading
diff --git a/docs.it4i/anselm/software/intel-suite/introduction.md b/docs.it4i/anselm/software/intel-suite/introduction.md
index f9f6f4093a1ed659c7cd4ed63bea944b4dd40ffe..879389f3f119e873d375b585da4e56f0dcfa5a79 100644
--- a/docs.it4i/anselm/software/intel-suite/introduction.md
+++ b/docs.it4i/anselm/software/intel-suite/introduction.md
@@ -12,10 +12,10 @@ The Anselm cluster provides following elements of the Intel Parallel Studio XE
 
 The Intel compilers version 13.1.3 are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler.
 
-```bash
-    $ module load intel
-    $ icc -v
-    $ ifort -v
+```console
+$ ml intel
+$ icc -v
+$ ifort -v
 ```
 
 Read more at the [Intel Compilers](intel-compilers/) page.
@@ -24,9 +24,9 @@ Read more at the [Intel Compilers](intel-compilers/) page.
 
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI.
 
-```bash
-    $ module load intel
-    $ idb
+```console
+$ ml intel
+$ idb
 ```
 
 Read more at the [Intel Debugger](intel-debugger/) page.
@@ -35,8 +35,8 @@ Read more at the [Intel Debugger](intel-debugger/) page.
 
 Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL unites and provides these basic components: BLAS, LAPACK, ScaLapack, PARDISO, FFT, VML, VSL, Data fitting, Feast Eigensolver and many more.
 
-```bash
-    $ module load mkl
+```console
+$ ml imkl
 ```
 
 Read more at the [Intel MKL](intel-mkl/) page.
@@ -45,8 +45,8 @@ Read more at the [Intel MKL](intel-mkl/) page.
 
 Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is available, via module ipp. The IPP is a library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax and many more.
 
-```bash
-    $ module load ipp
+```console
+$ ml ipp
 ```
 
 Read more at the [Intel IPP](intel-integrated-performance-primitives/) page.
@@ -55,8 +55,8 @@ Read more at the [Intel IPP](intel-integrated-performance-primitives/) page.
 
 Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. It is designed to promote scalable data parallel programming. Additionally, it fully supports nested parallelism, so you can build larger parallel components from smaller parallel components. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner.
 
-```bash
-    $ module load tbb
+```console
+$ ml tbb
 ```
 
 Read more at the [Intel TBB](intel-tbb/) page.
diff --git a/docs.it4i/anselm/software/intel-xeon-phi.md b/docs.it4i/anselm/software/intel-xeon-phi.md
index 6937a4453e27aa9cc8b10b5594d7d2c48a72b03a..d879361135e715e4af6862ed6636adb45a895fb1 100644
--- a/docs.it4i/anselm/software/intel-xeon-phi.md
+++ b/docs.it4i/anselm/software/intel-xeon-phi.md
@@ -8,25 +8,25 @@ Intel Xeon Phi can be programmed in several modes. The default mode on Anselm is
 
 To get access to a compute node with Intel Xeon Phi accelerator, use the PBS interactive session
 
-```bash
+```console
 $ qsub -I -q qmic -A NONE-0-0
 ```
 
 To set up the environment module "Intel" has to be loaded
 
-```bash
-$ module load intel/13.5.192
+```console
+$ ml intel
 ```
 
 Information about the hardware can be obtained by running the micinfo program on the host.
 
-```bash
+```console
 $ /usr/bin/micinfo
 ```
 
 The output of the "micinfo" utility executed on one of the Anselm node is as follows. (note: to get PCIe related details the command has to be run with root privileges)
 
-```bash
+```console
     MicInfo Utility Log
 
     Created Mon Jul 22 00:23:50 2013
@@ -92,14 +92,14 @@ The output of the "micinfo" utility executed on one of the Anselm node is as fol
 
 To compile a code for Intel Xeon Phi a MPSS stack has to be installed on the machine where compilation is executed. Currently the MPSS stack is only installed on compute nodes equipped with accelerators.
 
-```bash
+```console
 $ qsub -I -q qmic -A NONE-0-0
-$ module load intel/13.5.192
+$ ml intel
 ```
 
 For debugging purposes it is also recommended to set environment variable "OFFLOAD_REPORT". Value can be set from 0 to 3, where higher number means more debugging information.
 
-```bash
+```console
 export OFFLOAD_REPORT=3
 ```
 
@@ -108,8 +108,8 @@ A very basic example of code that employs offload programming technique is shown
 !!! note
     This code is sequential and utilizes only single core of the accelerator.
 
-```bash
-    $ vim source-offload.cpp
+```console
+$ vim source-offload.cpp
 
     #include <iostream>
 
@@ -130,22 +130,22 @@ A very basic example of code that employs offload programming technique is shown
 
 To compile a code using Intel compiler run
 
-```bash
-    $ icc source-offload.cpp -o bin-offload
+```console
+$ icc source-offload.cpp -o bin-offload
 ```
 
 To execute the code, run the following command on the host
 
-```bash
-    ./bin-offload
+```console
+$ ./bin-offload
 ```
 
 ### Parallelization in Offload Mode Using OpenMP
 
 One way of paralelization a code for Xeon Phi is using OpenMP directives. The following example shows code for parallel vector addition.
 
-```bash
-    $ vim ./vect-add
+```console
+$ vim ./vect-add
 
     #include <stdio.h>
 
@@ -224,10 +224,9 @@ One way of paralelization a code for Xeon Phi is using OpenMP directives. The fo
 
 During the compilation Intel compiler shows which loops have been vectorized in both host and accelerator. This can be enabled with compiler option "-vec-report2". To compile and execute the code run
 
-```bash
-    $ icc vect-add.c -openmp_report2 -vec-report2 -o vect-add
-
-    $ ./vect-add
+```console
+$ icc vect-add.c -openmp_report2 -vec-report2 -o vect-add
+$ ./vect-add
 ```
 
 Some interesting compiler flags useful not only for code debugging are:
@@ -255,8 +254,8 @@ The Automatic Offload may be enabled by either an MKL function call within the c
 
 or by setting environment variable
 
-```bash
-    $ export MKL_MIC_ENABLE=1
+```console
+$ export MKL_MIC_ENABLE=1
 ```
 
 To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation).
@@ -265,15 +264,15 @@ To get more information about automatic offload please refer to "[Using Intel®
 
 At first get an interactive PBS session on a node with MIC accelerator and load "intel" module that automatically loads "mkl" module as well.
 
-```bash
-    $ qsub -I -q qmic -A OPEN-0-0 -l select=1:ncpus=16
-    $ module load intel
+```console
+$ qsub -I -q qmic -A OPEN-0-0 -l select=1:ncpus=16
+$ module load intel
 ```
 
-Following example show how to automatically offload an SGEMM (single precision - g dir="auto">eneral matrix multiply) function to MIC coprocessor. The code can be copied to a file and compiled without any necessary modification.
+Following example show how to automatically offload an SGEMM (single precision - general matrix multiply) function to MIC coprocessor. The code can be copied to a file and compiled without any necessary modification.
 
-```bash
-    $ vim sgemm-ao-short.c
+```console
+$ vim sgemm-ao-short.c
 
     #include <stdio.h>
     #include <stdlib.h>
@@ -334,19 +333,19 @@ Following example show how to automatically offload an SGEMM (single precision -
 
 To compile a code using Intel compiler use:
 
-```bash
-    $ icc -mkl sgemm-ao-short.c -o sgemm
+```console
+$ icc -mkl sgemm-ao-short.c -o sgemm
 ```
 
 For debugging purposes enable the offload report to see more information about automatic offloading.
 
-```bash
-    $ export OFFLOAD_REPORT=2
+```console
+$ export OFFLOAD_REPORT=2
 ```
 
 The output of a code should look similar to following listing, where lines starting with [MKL] are generated by offload reporting:
 
-```bash
+```console
     Computing SGEMM on the host
     Enabling Automatic Offload
     Automatic Offload enabled: 1 MIC devices present
@@ -366,10 +365,9 @@ In the native mode a program is executed directly on Intel Xeon Phi without invo
 
 To compile a code user has to be connected to a compute with MIC and load Intel compilers module. To get an interactive session on a compute node with an Intel Xeon Phi and load the module use following commands:
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0
-
-    $ module load intel/13.5.192
+```console
+$ qsub -I -q qmic -A NONE-0-0
+$ ml intel
 ```
 
 !!! note
@@ -377,20 +375,20 @@ To compile a code user has to be connected to a compute with MIC and load Intel
 
 To produce a binary compatible with Intel Xeon Phi architecture user has to specify "-mmic" compiler flag. Two compilation examples are shown below. The first example shows how to compile OpenMP parallel code "vect-add.c" for host only:
 
-```bash
-    $ icc -xhost -no-offload -fopenmp vect-add.c -o vect-add-host
+```console
+$ icc -xhost -no-offload -fopenmp vect-add.c -o vect-add-host
 ```
 
 To run this code on host, use:
 
-```bash
-    $ ./vect-add-host
+```console
+$ ./vect-add-host
 ```
 
 The second example shows how to compile the same code for Intel Xeon Phi:
 
-```bash
-    $ icc -mmic -fopenmp vect-add.c -o vect-add-mic
+```console
+$ icc -mmic -fopenmp vect-add.c -o vect-add-mic
 ```
 
 ### Execution of the Program in Native Mode on Intel Xeon Phi
@@ -399,20 +397,20 @@ The user access to the Intel Xeon Phi is through the SSH. Since user home direct
 
 To connect to the accelerator run:
 
-```bash
-    $ ssh mic0
+```console
+$ ssh mic0
 ```
 
 If the code is sequential, it can be executed directly:
 
-```bash
-    mic0 $ ~/path_to_binary/vect-add-seq-mic
+```console
+mic0 $ ~/path_to_binary/vect-add-seq-mic
 ```
 
 If the code is parallelized using OpenMP a set of additional libraries is required for execution. To locate these libraries new path has to be added to the LD_LIBRARY_PATH environment variable prior to the execution:
 
-```bash
-    mic0 $ export LD_LIBRARY_PATH=/apps/intel/composer_xe_2013.5.192/compiler/lib/mic:$LD_LIBRARY_PATH
+```console
+mic0 $ export LD_LIBRARY_PATH=/apps/intel/composer_xe_2013.5.192/compiler/lib/mic:$LD_LIBRARY_PATH
 ```
 
 !!! note
@@ -431,8 +429,8 @@ For your information the list of libraries and their location required for execu
 
 Finally, to run the compiled code use:
 
-```bash
-    $ ~/path_to_binary/vect-add-mic
+```console
+$ ~/path_to_binary/vect-add-mic
 ```
 
 ## OpenCL
@@ -441,42 +439,42 @@ OpenCL (Open Computing Language) is an open standard for general-purpose paralle
 
 On Anselm OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes.
 
-```bash
-    module load opencl-sdk opencl-rt
+```console
+module load opencl-sdk opencl-rt
 ```
 
 Always load "opencl-sdk" (providing devel files like headers) and "opencl-rt" (providing dynamic library libOpenCL.so) modules to compile and link OpenCL code. Load "opencl-rt" for running your compiled code.
 
 There are two basic examples of OpenCL code in the following directory:
 
-```bash
-    /apps/intel/opencl-examples/
+```console
+/apps/intel/opencl-examples/
 ```
 
 First example "CapsBasic" detects OpenCL compatible hardware, here CPU and MIC, and prints basic information about the capabilities of it.
 
-```bash
-    /apps/intel/opencl-examples/CapsBasic/capsbasic
+```console
+/apps/intel/opencl-examples/CapsBasic/capsbasic
 ```
 
 To compile and run the example copy it to your home directory, get a PBS interactive session on of the nodes with MIC and run make for compilation. Make files are very basic and shows how the OpenCL code can be compiled on Anselm.
 
-```bash
-    $ cp /apps/intel/opencl-examples/CapsBasic/* .
-    $ qsub -I -q qmic -A NONE-0-0
-    $ make
+```console
+$ cp /apps/intel/opencl-examples/CapsBasic/* .
+$ qsub -I -q qmic -A NONE-0-0
+$ make
 ```
 
 The compilation command for this example is:
 
-```bash
-    $ g++ capsbasic.cpp -lOpenCL -o capsbasic -I/apps/intel/opencl/include/
+```console
+$ g++ capsbasic.cpp -lOpenCL -o capsbasic -I/apps/intel/opencl/include/
 ```
 
 After executing the complied binary file, following output should be displayed.
 
-```bash
-    ./capsbasic
+```console
+$ ./capsbasic
 
     Number of available platforms: 1
     Platform names:
@@ -506,22 +504,22 @@ After executing the complied binary file, following output should be displayed.
 
 The second example that can be found in "/apps/intel/opencl-examples" directory is General Matrix Multiply. You can follow the the same procedure to download the example to your directory and compile it.
 
-```bash
-    $ cp -r /apps/intel/opencl-examples/* .
-    $ qsub -I -q qmic -A NONE-0-0
-    $ cd GEMM
-    $ make
+```console
+$ cp -r /apps/intel/opencl-examples/* .
+$ qsub -I -q qmic -A NONE-0-0
+$ cd GEMM
+$ make
 ```
 
 The compilation command for this example is:
 
-```bash
-    $ g++ cmdoptions.cpp gemm.cpp ../common/basic.cpp ../common/cmdparser.cpp ../common/oclobject.cpp -I../common -lOpenCL -o gemm -I/apps/intel/opencl/include/
+```console
+$ g++ cmdoptions.cpp gemm.cpp ../common/basic.cpp ../common/cmdparser.cpp ../common/oclobject.cpp -I../common -lOpenCL -o gemm -I/apps/intel/opencl/include/
 ```
 
 To see the performance of Intel Xeon Phi performing the DGEMM run the example as follows:
 
-```bash
+```console
     ./gemm -d 1
     Platforms (1):
      [0] Intel(R) OpenCL [Selected]
@@ -550,26 +548,26 @@ To see the performance of Intel Xeon Phi performing the DGEMM run the example as
 
 Again an MPI code for Intel Xeon Phi has to be compiled on a compute node with accelerator and MPSS software stack installed. To get to a compute node with accelerator use:
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0
+```console
+$ qsub -I -q qmic -A NONE-0-0
 ```
 
 The only supported implementation of MPI standard for Intel Xeon Phi is Intel MPI. To setup a fully functional development environment a combination of Intel compiler and Intel MPI has to be used. On a host load following modules before compilation:
 
-```bash
-    $ module load intel/13.5.192 impi/4.1.1.036
+```console
+$ module load intel
 ```
 
 To compile an MPI code for host use:
 
-````bash
-        $ mpiicc -xhost -o mpi-test mpi-test.c
-    ```bash
+````console
+$ mpiicc -xhost -o mpi-test mpi-test.c
+```
 
-    To compile the same code for Intel Xeon Phi architecture use:
+To compile the same code for Intel Xeon Phi architecture use:
 
-    ```bash
-        $ mpiicc -mmic -o mpi-test-mic mpi-test.c
+```console
+$ mpiicc -mmic -o mpi-test-mic mpi-test.c
 ````
 
 An example of basic MPI version of "hello-world" example in C language, that can be executed on both host and Xeon Phi is (can be directly copy and pasted to a .c file)
@@ -614,13 +612,13 @@ Intel MPI for the Xeon Phi coprocessors offers different MPI programming models:
 
 In this case all environment variables are set by modules, so to execute the compiled MPI program on a single node, use:
 
-```bash
-    $ mpirun -np 4 ./mpi-test
+```console
+$ mpirun -np 4 ./mpi-test
 ```
 
 The output should be similar to:
 
-```bash
+```console
     Hello world from process 1 of 4 on host cn207
     Hello world from process 3 of 4 on host cn207
     Hello world from process 2 of 4 on host cn207
@@ -636,8 +634,8 @@ coprocessor; or 2.) lunch the task using "**mpiexec.hydra**" from a host.
 
 Similarly to execution of OpenMP programs in native mode, since the environmental module are not supported on MIC, user has to setup paths to Intel MPI libraries and binaries manually. One time setup can be done by creating a "**.profile**" file in user's home directory. This file sets up the environment on the MIC automatically once user access to the accelerator through the SSH.
 
-```bash
-    $ vim ~/.profile
+```console
+$ vim ~/.profile
 
     PS1='[u@h W]$ '
     export PATH=/usr/bin:/usr/sbin:/bin:/sbin
@@ -656,25 +654,25 @@ Similarly to execution of OpenMP programs in native mode, since the environmenta
 
 To access a MIC accelerator located on a node that user is currently connected to, use:
 
-```bash
-    $ ssh mic0
+```console
+$ ssh mic0
 ```
 
 or in case you need specify a MIC accelerator on a particular node, use:
 
-```bash
-    $ ssh cn207-mic0
+```console
+$ ssh cn207-mic0
 ```
 
 To run the MPI code in parallel on multiple core of the accelerator, use:
 
-```bash
-    $ mpirun -np 4 ./mpi-test-mic
+```console
+$ mpirun -np 4 ./mpi-test-mic
 ```
 
 The output should be similar to:
 
-```bash
+```console
     Hello world from process 1 of 4 on host cn207-mic0
     Hello world from process 2 of 4 on host cn207-mic0
     Hello world from process 3 of 4 on host cn207-mic0
@@ -687,20 +685,20 @@ If the MPI program is launched from host instead of the coprocessor, the environ
 
 First step is to tell mpiexec that the MPI should be executed on a local accelerator by setting up the environmental variable "I_MPI_MIC"
 
-```bash
-    $ export I_MPI_MIC=1
+```console
+$ export I_MPI_MIC=1
 ```
 
 Now the MPI program can be executed as:
 
-```bash
-    $ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic
+```console
+$ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic
 ```
 
 or using mpirun
 
-```bash
-    $ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic
+```console
+$ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic
 ```
 
 !!! note
@@ -709,7 +707,7 @@ or using mpirun
 
 The output should be again similar to:
 
-```bash
+```console
     Hello world from process 1 of 4 on host cn207-mic0
     Hello world from process 2 of 4 on host cn207-mic0
     Hello world from process 3 of 4 on host cn207-mic0
@@ -721,8 +719,8 @@ The output should be again similar to:
 
 A simple test to see if the file is present is to execute:
 
-```bash
-      $ ssh mic0 ls /bin/pmi_proxy
+```console
+$ ssh mic0 ls /bin/pmi_proxy
       /bin/pmi_proxy
 ```
 
@@ -730,21 +728,20 @@ A simple test to see if the file is present is to execute:
 
 To get access to multiple nodes with MIC accelerator, user has to use PBS to allocate the resources. To start interactive session, that allocates 2 compute nodes = 2 MIC accelerators run qsub command with following parameters:
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0 -l select=2:ncpus=16
-
-    $ module load intel/13.5.192 impi/4.1.1.036
+```console
+$ qsub -I -q qmic -A NONE-0-0 -l select=2:ncpus=16
+$ ml intel/13.5.192 impi/4.1.1.036
 ```
 
 This command connects user through ssh to one of the nodes immediately. To see the other nodes that have been allocated use:
 
-```bash
-    $ cat $PBS_NODEFILE
+```console
+$ cat $PBS_NODEFILE
 ```
 
 For example:
 
-```bash
+```console
     cn204.bullx
     cn205.bullx
 ```
@@ -759,14 +756,14 @@ This output means that the PBS allocated nodes cn204 and cn205, which means that
 
 At this point we expect that correct modules are loaded and binary is compiled. For parallel execution the mpiexec.hydra is used. Again the first step is to tell mpiexec that the MPI can be executed on MIC accelerators by setting up the environmental variable "I_MPI_MIC"
 
-```bash
-    $ export I_MPI_MIC=1
+```console
+$ export I_MPI_MIC=1
 ```
 
 The launch the MPI program use:
 
-```bash
-    $ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
+```console
+$ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
      -genv I_MPI_FABRICS_LIST tcp
      -genv I_MPI_FABRICS shm:tcp
      -genv I_MPI_TCP_NETMASK=10.1.0.0/16
@@ -776,8 +773,8 @@ The launch the MPI program use:
 
 or using mpirun:
 
-```bash
-    $ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
+```console
+$ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
      -genv I_MPI_FABRICS_LIST tcp
      -genv I_MPI_FABRICS shm:tcp
      -genv I_MPI_TCP_NETMASK=10.1.0.0/16
@@ -787,7 +784,7 @@ or using mpirun:
 
 In this case four MPI processes are executed on accelerator cn204-mic and six processes are executed on accelerator cn205-mic0. The sample output (sorted after execution) is:
 
-```bash
+```console
     Hello world from process 0 of 10 on host cn204-mic0
     Hello world from process 1 of 10 on host cn204-mic0
     Hello world from process 2 of 10 on host cn204-mic0
@@ -802,8 +799,8 @@ In this case four MPI processes are executed on accelerator cn204-mic and six pr
 
 The same way MPI program can be executed on multiple hosts:
 
-```bash
-    $ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
+```console
+$ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
      -genv I_MPI_FABRICS_LIST tcp
      -genv I_MPI_FABRICS shm:tcp
      -genv I_MPI_TCP_NETMASK=10.1.0.0/16
@@ -818,8 +815,8 @@ architecture and requires different binary file produced by the Intel compiler t
 
 In the previous section we have compiled two binary files, one for hosts "**mpi-test**" and one for MIC accelerators "**mpi-test-mic**". These two binaries can be executed at once using mpiexec.hydra:
 
-```bash
-    $ mpiexec.hydra
+```console
+$ mpiexec.hydra
      -genv I_MPI_FABRICS_LIST tcp
      -genv I_MPI_FABRICS shm:tcp
      -genv I_MPI_TCP_NETMASK=10.1.0.0/16
@@ -832,7 +829,7 @@ In this example the first two parameters (line 2 and 3) sets up required environ
 
 The output of the program is:
 
-```bash
+```console
     Hello world from process 0 of 4 on host cn205
     Hello world from process 1 of 4 on host cn205
     Hello world from process 2 of 4 on host cn205-mic0
@@ -843,8 +840,8 @@ The execution procedure can be simplified by using the mpirun command with the m
 
 An example of a machine file that uses 2 >hosts (**cn205** and **cn206**) and 2 accelerators **(cn205-mic0** and **cn206-mic0**) to run 2 MPI processes on each of them:
 
-```bash
-    $ cat hosts_file_mix
+```console
+$ cat hosts_file_mix
     cn205:2
     cn205-mic0:2
     cn206:2
@@ -853,14 +850,14 @@ An example of a machine file that uses 2 >hosts (**cn205** and **cn206**) and 2
 
 In addition if a naming convention is set in a way that the name of the binary for host is **"bin_name"**  and the name of the binary for the accelerator is **"bin_name-mic"** then by setting up the environment variable **I_MPI_MIC_POSTFIX** to **"-mic"** user do not have to specify the names of booth binaries. In this case mpirun needs just the name of the host binary file (i.e. "mpi-test") and uses the suffix to get a name of the binary for accelerator (i..e. "mpi-test-mic").
 
-```bash
-    $ export I_MPI_MIC_POSTFIX=-mic
+```console
+$ export I_MPI_MIC_POSTFIX=-mic
 ```
 
 To run the MPI code using mpirun and the machine file "hosts_file_mix" use:
 
-```bash
-    $ mpirun
+```console
+$ mpirun
      -genv I_MPI_FABRICS shm:tcp
      -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
      -genv I_MPI_FABRICS_LIST tcp
@@ -872,7 +869,7 @@ To run the MPI code using mpirun and the machine file "hosts_file_mix" use:
 
 A possible output of the MPI "hello-world" example executed on two hosts and two accelerators is:
 
-```bash
+```console
     Hello world from process 0 of 8 on host cn204
     Hello world from process 1 of 8 on host cn204
     Hello world from process 2 of 8 on host cn204-mic0
@@ -886,18 +883,21 @@ A possible output of the MPI "hello-world" example executed on two hosts and two
 !!! note
     At this point the MPI communication between MIC accelerators on different nodes uses 1Gb Ethernet only.
 
-### Using the PBS Automatically Generated Node-Files
+### Using Automatically Generated Node-Files
 
-PBS also generates a set of node-files that can be used instead of manually creating a new one every time. Three node-files are genereated:
+Set of node-files, that can be used instead of manually creating a new one every time, is generated for user convenience. Six node-files are generated:
 
 !!! note
-    **Host only node-file:**
+    **Node-files:**
 
-     - /lscratch/${PBS_JOBID}/nodefile-cn MIC only node-file:
-     - /lscratch/${PBS_JOBID}/nodefile-mic Host and MIC node-file:
-     - /lscratch/${PBS_JOBID}/nodefile-mix
+     - /lscratch/${PBS_JOBID}/nodefile-cn Hosts only node-file
+     - /lscratch/${PBS_JOBID}/nodefile-mic MICs only node-file
+     - /lscratch/${PBS_JOBID}/nodefile-mix Hosts and MICs node-file
+     - /lscratch/${PBS_JOBID}/nodefile-cn-sn Hosts only node-file, using short names
+     - /lscratch/${PBS_JOBID}/nodefile-mic-sn MICs only node-file, using short names
+     - /lscratch/${PBS_JOBID}/nodefile-mix-sn Hosts and MICs node-file, using short names
 
-Each host or accelerator is listed only per files. User has to specify how many jobs should be executed per node using `-n` parameter of the mpirun command.
+Each host or accelerator is listed only once per file. User has to specify how many jobs should be executed per node using `-n` parameter of the mpirun command.
 
 ## Optimization
 
diff --git a/docs.it4i/anselm/software/isv_licenses.md b/docs.it4i/anselm/software/isv_licenses.md
index 56270b51feca30fe2ec4f297da6cb0d6ee62d6e7..f26319ec1c0bcbe64bc4ca0ae92975a60572cabd 100644
--- a/docs.it4i/anselm/software/isv_licenses.md
+++ b/docs.it4i/anselm/software/isv_licenses.md
@@ -15,8 +15,7 @@ If an ISV application was purchased for educational (research) purposes and also
 
 ### Web Interface
 
-For each license there is a table, which provides the information about the name, number of available (purchased/licensed), number of used and number of free license features
-<https://extranet.it4i.cz/anselm/licenses>
+For each license there is a table, which provides the information about the name, number of available (purchased/licensed), number of used and number of free license features <https://extranet.it4i.cz/anselm/licenses>
 
 ### Text Interface
 
@@ -34,8 +33,8 @@ The file has a header which serves as a legend. All the info in the legend start
 
 Example of the Commercial Matlab license state:
 
-```bash
-    $ cat /apps/user/licenses/matlab_features_state.txt
+```console
+$ cat /apps/user/licenses/matlab_features_state.txt
     # matlab
     # -------------------------------------------------
     # FEATURE                       TOTAL   USED AVAIL
@@ -99,8 +98,8 @@ Resource names in PBS Pro are case sensitive.
 
 Run an interactive PBS job with 1 Matlab EDU license, 1 Distributed Computing Toolbox and 32 Distributed Computing Engines (running on 32 cores):
 
-```bash
-    $ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=16 -l feature__matlab-edu__MATLAB=1 -l feature__matlab-edu__Distrib_Computing_Toolbox=1 -l feature__matlab-edu__MATLAB_Distrib_Comp_Engine=32
+```console
+$ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=16 -l feature__matlab-edu__MATLAB=1 -l feature__matlab-edu__Distrib_Computing_Toolbox=1 -l feature__matlab-edu__MATLAB_Distrib_Comp_Engine=32
 ```
 
 The license is used and accounted only with the real usage of the product. So in this example, the general Matlab is used after Matlab is run by the user and not at the time, when the shell of the interactive job is started. Also the Distributed Computing licenses are used at the time, when the user uses the distributed parallel computation in Matlab (e. g. issues pmode start, matlabpool, etc.).
diff --git a/docs.it4i/anselm/software/java.md b/docs.it4i/anselm/software/java.md
index ddf032eb4eef469e8c68de98f16965696b153c72..a9de126760592f8fdb983242eb397ebf00c80c42 100644
--- a/docs.it4i/anselm/software/java.md
+++ b/docs.it4i/anselm/software/java.md
@@ -4,24 +4,24 @@
 
 Java is available on Anselm cluster. Activate java by loading the java module
 
-```bash
-    $ module load java
+```console
+$ ml Java
 ```
 
 Note that the java module must be loaded on the compute nodes as well, in order to run java on compute nodes.
 
 Check for java version and path
 
-```bash
-    $ java -version
-    $ which java
+```console
+$ java -version
+$ which java
 ```
 
 With the module loaded, not only the runtime environment (JRE), but also the development environment (JDK) with the compiler is available.
 
-```bash
-    $ javac -version
-    $ which javac
+```console
+$ javac -version
+$ which javac
 ```
 
 Java applications may use MPI for inter-process communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/).
diff --git a/docs.it4i/anselm/software/mpi/Running_OpenMPI.md b/docs.it4i/anselm/software/mpi/Running_OpenMPI.md
index 8e11a3c163bcac6a711e18c4232a98a6acb5a16f..4974eb5b16625faa930a69cded916948257d00a5 100644
--- a/docs.it4i/anselm/software/mpi/Running_OpenMPI.md
+++ b/docs.it4i/anselm/software/mpi/Running_OpenMPI.md
@@ -11,16 +11,14 @@ The OpenMPI programs may be executed only via the PBS Workload manager, by enter
 
 Example:
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16 -I
+```console
+$ qsub -q qexp -l select=4:ncpus=16 -I
     qsub: waiting for job 15210.srv11 to start
     qsub: job 15210.srv11 ready
-
-    $ pwd
+$ pwd
     /home/username
-
-    $ module load openmpi
-    $ mpiexec -pernode ./helloworld_mpi.x
+$ ml OpenMPI
+$ mpiexec -pernode ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host cn17
     Hello world! from rank 1 of 4 on host cn108
     Hello world! from rank 2 of 4 on host cn109
@@ -35,11 +33,10 @@ same path on all nodes. This is automatically fulfilled on the /home and /scratc
 
 You need to preload the executable, if running on the local scratch /lscratch filesystem
 
-```bash
-    $ pwd
+```console
+$ pwd
     /lscratch/15210.srv11
-
-    $ mpiexec -pernode --preload-binary ./helloworld_mpi.x
+$ mpiexec -pernode --preload-binary ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host cn17
     Hello world! from rank 1 of 4 on host cn108
     Hello world! from rank 2 of 4 on host cn109
@@ -57,12 +54,10 @@ The mpiprocs and ompthreads parameters allow for selection of number of running
 
 Follow this example to run one MPI process per node, 16 threads per process.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=1:ompthreads=16 -I
-
-    $ module load openmpi
-
-    $ mpiexec --bind-to-none ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=1:ompthreads=16 -I
+$ ml OpenMPI
+$ mpiexec --bind-to-none ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 1 MPI processes per node and 16 threads per socket, on 4 nodes.
@@ -71,12 +66,10 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 Follow this example to run two MPI processes per node, 8 threads per process. Note the options to mpiexec.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=2:ompthreads=8 -I
-
-    $ module load openmpi
-
-    $ mpiexec -bysocket -bind-to-socket ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=2:ompthreads=8 -I
+$ ml openmpi
+$ mpiexec -bysocket -bind-to-socket ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 2 MPI processes per node and 8 threads per socket, each process and its threads bound to a separate processor socket of the node, on 4 nodes
@@ -85,12 +78,10 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 Follow this example to run 16 MPI processes per node, 1 thread per process. Note the options to mpiexec.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=16:ompthreads=1 -I
-
-    $ module load openmpi
-
-    $ mpiexec -bycore -bind-to-core ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=16:ompthreads=1 -I
+$ ml OpenMPI
+$ mpiexec -bycore -bind-to-core ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 16 MPI processes per node, single threaded. Each process is bound to separate processor core, on 4 nodes.
@@ -102,19 +93,19 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP:
 
-```bash
-    $ export GOMP_CPU_AFFINITY="0-15"
+```console
+$ export GOMP_CPU_AFFINITY="0-15"
 ```
 
 or this one for Intel OpenMP:
 
-```bash
+```console
 $ export KMP_AFFINITY=granularity=fine,compact,1,0
 ```
 
 As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the following variables may be used for Intel or GCC:
 
-```bash
+```console
 $ export OMP_PROC_BIND=true
 $ export OMP_PLACES=cores
 ```
@@ -129,7 +120,7 @@ MPI process mapping may be specified by a hostfile or rankfile input to the mpie
 
 Example hostfile
 
-```bash
+```console
     cn110.bullx
     cn109.bullx
     cn108.bullx
@@ -138,8 +129,8 @@ Example hostfile
 
 Use the hostfile to control process placement
 
-```bash
-    $ mpiexec -hostfile hostfile ./helloworld_mpi.x
+```console
+$ mpiexec -hostfile hostfile ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host cn110
     Hello world! from rank 1 of 4 on host cn109
     Hello world! from rank 2 of 4 on host cn108
@@ -157,7 +148,7 @@ Exact control of MPI process placement and resource binding is provided by speci
 
 Example rankfile
 
-```bash
+```console
     rank 0=cn110.bullx slot=1:0,1
     rank 1=cn109.bullx slot=0:*
     rank 2=cn108.bullx slot=1:1-2
@@ -174,8 +165,8 @@ rank 2 will be bounded to cn108, socket1, core1 and core2
 rank 3 will be bounded to cn17, socket0 core1, socket1 core0, core1, core2
 rank 4 will be bounded to cn109, all cores on both sockets
 
-```bash
-    $ mpiexec -n 5 -rf rankfile --report-bindings ./helloworld_mpi.x
+```console
+$ mpiexec -n 5 -rf rankfile --report-bindings ./helloworld_mpi.x
     [cn17:11180]  MCW rank 3 bound to socket 0[core 1] socket 1[core 0-2]: [. B . . . . . .][B B B . . . . .] (slot list 0:1,1:0-2)
     [cn110:09928] MCW rank 0 bound to socket 1[core 0-1]: [. . . . . . . .][B B . . . . . .] (slot list 1:0,1)
     [cn109:10395] MCW rank 1 bound to socket 0[core 0-7]: [B B B B B B B B][. . . . . . . .] (slot list 0:*)
@@ -196,10 +187,10 @@ It is users responsibility to provide correct number of ranks, sockets and cores
 
 In all cases, binding and threading may be verified by executing for example:
 
-```bash
-    $ mpiexec -bysocket -bind-to-socket --report-bindings echo
-    $ mpiexec -bysocket -bind-to-socket numactl --show
-    $ mpiexec -bysocket -bind-to-socket echo $OMP_NUM_THREADS
+```console
+$ mpiexec -bysocket -bind-to-socket --report-bindings echo
+$ mpiexec -bysocket -bind-to-socket numactl --show
+$ mpiexec -bysocket -bind-to-socket echo $OMP_NUM_THREADS
 ```
 
 ## Changes in OpenMPI 1.8
diff --git a/docs.it4i/anselm/software/mpi/mpi.md b/docs.it4i/anselm/software/mpi/mpi.md
index bc60afb16ebee9968d942c0e4189f79705118276..4313bf513d5262a4b3eba0f1ef10380142f3a2ef 100644
--- a/docs.it4i/anselm/software/mpi/mpi.md
+++ b/docs.it4i/anselm/software/mpi/mpi.md
@@ -14,10 +14,8 @@ The Anselm cluster provides several implementations of the MPI library:
 
 MPI libraries are activated via the environment modules.
 
-Look up section modulefiles/mpi in module avail
-
-```bash
-    $ module avail
+```console
+$ ml av mpi/
     ------------------------- /opt/modules/modulefiles/mpi -------------------------
     bullxmpi/bullxmpi-1.2.4.1 mvapich2/1.9-icc
     impi/4.0.3.008             openmpi/1.6.5-gcc(default)
@@ -43,17 +41,17 @@ There are default compilers associated with any particular MPI implementation. T
 
 Examples:
 
-```bash
-    $ module load openmpi
+```console
+$ ml OpenMPI **or** ml openmpi **for older versions**
 ```
 
 In this example, we activate the latest openmpi with latest GNU compilers
 
 To use openmpi with the intel compiler suite, use
 
-```bash
-    $ module load intel
-    $ module load openmpi/1.6.5-icc
+```console
+$ ml intel
+$ ml openmpi/1.6.5-icc
 ```
 
 In this example, the openmpi 1.6.5 using intel compilers is activated
@@ -63,10 +61,10 @@ In this example, the openmpi 1.6.5 using intel compilers is activated
 !!! note
     After setting up your MPI environment, compile your program using one of the mpi wrappers
 
-```bash
-    $ mpicc -v
-    $ mpif77 -v
-    $ mpif90 -v
+```console
+$ mpicc -v
+$ mpif77 -v
+$ mpif90 -v
 ```
 
 Example program:
@@ -101,8 +99,8 @@ Example program:
 
 Compile the above example with
 
-```bash
-    $ mpicc helloworld_mpi.c -o helloworld_mpi.x
+```console
+$ mpicc helloworld_mpi.c -o helloworld_mpi.x
 ```
 
 ## Running MPI Programs
diff --git a/docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md
index 9625ed53e88575101548ddbe48687829ac18414c..4d687dc2f61e9ae593a7900b1bf183e07e61634f 100644
--- a/docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md
@@ -12,11 +12,27 @@ On Anselm MPI4Py is available in standard Python modules.
 
 MPI4Py is build for OpenMPI. Before you start with MPI4Py you need to load Python and OpenMPI modules.
 
-```bash
-    $ module load python
-    $ module load openmpi
+```console
+$ ml av Python/
+--------------------------------------- /apps/modules/lang -------------------------
+   Python/2.7.8-intel-2015b    Python/2.7.11-intel-2016a  Python/3.5.1-intel-2017.00
+   Python/2.7.11-intel-2017a   Python/2.7.9-foss-2015b    Python/2.7.9-intel-2015b
+   Python/2.7.11-foss-2016a    Python/3.5.2-foss-2016a    Python/3.5.1
+   Python/2.7.9-foss-2015g     Python/3.4.3-intel-2015b   Python/2.7.9
+   Python/2.7.11-intel-2015b   Python/3.5.2
+   
+$ ml av OpenMPI/
+--------------------------------------- /apps/modules/mpi --------------------------
+OpenMPI/1.8.6-GCC-4.4.7-system   OpenMPI/1.8.8-GNU-4.9.3-2.25  OpenMPI/1.10.1-GCC-4.9.3-2.25
+OpenMPI/1.8.6-GNU-5.1.0-2.25     OpenMPI/1.8.8-GNU-5.1.0-2.25  OpenMPI/1.10.1-GNU-4.9.3-2.25
+    OpenMPI/1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25   OpenMPI/2.0.2-GCC-6.3.0-2.27
 ```
 
+!!! Warning ""
+    * modules Python/x.x.x-intel... - intel MPI
+    * modules Python/x.x.x-foss...  - OpenMPI
+    * modules Python/x.x.x - without MPI
+
 ## Execution
 
 You need to import MPI to your python program. Include the following line to the python script:
@@ -27,14 +43,14 @@ You need to import MPI to your python program. Include the following line to the
 
 The MPI4Py enabled python programs [execute as any other OpenMPI](Running_OpenMPI/) code.The simpliest way is to run
 
-```bash
-    $ mpiexec python <script>.py
+```console
+$ mpiexec python <script>.py
 ```
 
 For example
 
-```bash
-    $ mpiexec python hello_world.py
+```console
+$ mpiexec python hello_world.py
 ```
 
 ## Examples
@@ -82,12 +98,11 @@ For example
 
 Execute the above code as:
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=16:ompthreads=1 -I
-
-    $ module load python openmpi
-
-    $ mpiexec -bycore -bind-to-core python hello_world.py
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=16:ompthreads=1 -I
+$ ml Python
+$ ml OpenMPI
+$ mpiexec -bycore -bind-to-core python hello_world.py
 ```
 
 In this example, we run MPI4Py enabled code on 4 nodes, 16 cores per node (total of 64 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pypi.python.org/pypi/mpi4py).
diff --git a/docs.it4i/anselm/software/mpi/running-mpich2.md b/docs.it4i/anselm/software/mpi/running-mpich2.md
index 64d3c620fddf82b25339d535fb984067924ef29a..7b37a811802ffe6aa142cad5773cfc20e842b6fd 100644
--- a/docs.it4i/anselm/software/mpi/running-mpich2.md
+++ b/docs.it4i/anselm/software/mpi/running-mpich2.md
@@ -11,14 +11,12 @@ The MPICH2 programs use mpd daemon or ssh connection to spawn processes, no PBS
 
 Example:
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16 -I
+```console
+$ qsub -q qexp -l select=4:ncpus=16 -I
     qsub: waiting for job 15210.srv11 to start
     qsub: job 15210.srv11 ready
-
-    $ module load impi
-
-    $ mpirun -ppn 1 -hostfile $PBS_NODEFILE ./helloworld_mpi.x
+$ ml impi
+$ mpirun -ppn 1 -hostfile $PBS_NODEFILE ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host cn17
     Hello world! from rank 1 of 4 on host cn108
     Hello world! from rank 2 of 4 on host cn109
@@ -30,11 +28,11 @@ Note that the executable helloworld_mpi.x must be available within the same path
 
 You need to preload the executable, if running on the local scratch /lscratch filesystem
 
-```bash
-    $ pwd
+```console
+$ pwd
     /lscratch/15210.srv11
-    $ mpirun -ppn 1 -hostfile $PBS_NODEFILE cp /home/username/helloworld_mpi.x .
-    $ mpirun -ppn 1 -hostfile $PBS_NODEFILE ./helloworld_mpi.x
+$ mpirun -ppn 1 -hostfile $PBS_NODEFILE cp /home/username/helloworld_mpi.x .
+$ mpirun -ppn 1 -hostfile $PBS_NODEFILE ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host cn17
     Hello world! from rank 1 of 4 on host cn108
     Hello world! from rank 2 of 4 on host cn109
@@ -52,12 +50,10 @@ The mpiprocs and ompthreads parameters allow for selection of number of running
 
 Follow this example to run one MPI process per node, 16 threads per process. Note that no options to mpirun are needed
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=1:ompthreads=16 -I
-
-    $ module load mvapich2
-
-    $ mpirun ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=1:ompthreads=16 -I
+$ ml mvapich2
+$ mpirun ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 1 MPI processes per node and 16 threads per socket, on 4 nodes.
@@ -66,12 +62,10 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 Follow this example to run two MPI processes per node, 8 threads per process. Note the options to mpirun for mvapich2. No options are needed for impi.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=2:ompthreads=8 -I
-
-    $ module load mvapich2
-
-    $ mpirun -bind-to numa ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=2:ompthreads=8 -I
+$ ml mvapich2
+$ mpirun -bind-to numa ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 2 MPI processes per node and 8 threads per socket, each process and its threads bound to a separate processor socket of the node, on 4 nodes
@@ -80,12 +74,10 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 Follow this example to run 16 MPI processes per node, 1 thread per process. Note the options to mpirun for mvapich2. No options are needed for impi.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=16:mpiprocs=16:ompthreads=1 -I
-
-    $ module load mvapich2
-
-    $ mpirun -bind-to core ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=16:mpiprocs=16:ompthreads=1 -I
+$ ml mvapich2
+$ mpirun -bind-to core ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 16 MPI processes per node, single threaded. Each process is bound to separate processor core, on 4 nodes.
@@ -97,21 +89,21 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP:
 
-```bash
-    $ export GOMP_CPU_AFFINITY="0-15"
+```console
+$ export GOMP_CPU_AFFINITY="0-15"
 ```
 
 or this one for Intel OpenMP:
 
-```bash
-    $ export KMP_AFFINITY=granularity=fine,compact,1,0
+```console
+$ export KMP_AFFINITY=granularity=fine,compact,1,0
 ```
 
 As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the following variables may be used for Intel or GCC:
 
-```bash
-    $ export OMP_PROC_BIND=true
-    $ export OMP_PLACES=cores
+```console
+$ export OMP_PROC_BIND=true
+$ export OMP_PLACES=cores
 ```
 
 ## MPICH2 Process Mapping and Binding
@@ -124,7 +116,7 @@ Process mapping may be controlled by specifying a machinefile input to the mpiru
 
 Example machinefile
 
-```bash
+```console
     cn110.bullx
     cn109.bullx
     cn108.bullx
@@ -134,8 +126,8 @@ Example machinefile
 
 Use the machinefile to control process placement
 
-```bash
-    $ mpirun -machinefile machinefile helloworld_mpi.x
+```console
+$ mpirun -machinefile machinefile helloworld_mpi.x
     Hello world! from rank 0 of 5 on host cn110
     Hello world! from rank 1 of 5 on host cn109
     Hello world! from rank 2 of 5 on host cn108
@@ -153,9 +145,9 @@ The Intel MPI automatically binds each process and its threads to the correspond
 
 In all cases, binding and threading may be verified by executing
 
-```bash
-    $ mpirun  -bindto numa numactl --show
-    $ mpirun  -bindto numa echo $OMP_NUM_THREADS
+```console
+$ mpirun  -bindto numa numactl --show
+$ mpirun  -bindto numa echo $OMP_NUM_THREADS
 ```
 
 ## Intel MPI on Xeon Phi
diff --git a/docs.it4i/anselm/software/numerical-languages/introduction.md b/docs.it4i/anselm/software/numerical-languages/introduction.md
index 67493f1f7d099c0c9a8986b2118bff77aa4dd38b..8646fe6fed34038028fdab9dbcde98840d204944 100644
--- a/docs.it4i/anselm/software/numerical-languages/introduction.md
+++ b/docs.it4i/anselm/software/numerical-languages/introduction.md
@@ -10,9 +10,9 @@ This section contains a collection of high-level interpreted languages, primaril
 
 MATLAB® is a high-level language and interactive environment for numerical computation, visualization, and programming.
 
-```bash
-    $ module load MATLAB/2015b-EDU
-    $ matlab
+```console
+$ ml MATLAB/2015b-EDU
+$ matlab
 ```
 
 Read more at the [Matlab page](matlab/).
@@ -21,9 +21,9 @@ Read more at the [Matlab page](matlab/).
 
 GNU Octave is a high-level interpreted language, primarily intended for numerical computations. The Octave language is quite similar to Matlab so that most programs are easily portable.
 
-```bash
-    $ module load Octave
-    $ octave
+```console
+$ ml Octave
+$ octave
 ```
 
 Read more at the [Octave page](octave/).
@@ -32,9 +32,9 @@ Read more at the [Octave page](octave/).
 
 The R is an interpreted language and environment for statistical computing and graphics.
 
-```bash
-    $ module load R
-    $ R
+```console
+$ ml R
+$ R
 ```
 
 Read more at the [R page](r/).
diff --git a/docs.it4i/anselm/software/numerical-languages/matlab.md b/docs.it4i/anselm/software/numerical-languages/matlab.md
index d7c3d907452ca38deea8f07235170ead3114c1eb..ac1b0cc5e6b5728f0079b57b771ec17a219f4d8d 100644
--- a/docs.it4i/anselm/software/numerical-languages/matlab.md
+++ b/docs.it4i/anselm/software/numerical-languages/matlab.md
@@ -9,14 +9,14 @@ Matlab is available in versions R2015a and R2015b. There are always two variants
 
 To load the latest version of Matlab load the module
 
-```bash
-    $ module load MATLAB
+```console
+$ ml MATLAB
 ```
 
 By default the EDU variant is marked as default. If you need other version or variant, load the particular version. To obtain the list of available versions use
 
-```bash
-    $ module avail MATLAB
+```console
+$ ml av MATLAB
 ```
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
@@ -27,14 +27,14 @@ Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so u
 
 To run Matlab with GUI, use
 
-```bash
-    $ matlab
+```console
+$ matlab
 ```
 
 To run Matlab in text mode, without the Matlab Desktop GUI environment, use
 
-```bash
-    $ matlab -nodesktop -nosplash
+```console
+$ matlab -nodesktop -nosplash
 ```
 
 plots, images, etc... will be still available.
@@ -50,7 +50,7 @@ Delete previously used file mpiLibConf.m, we have observed crashes when using In
 
 To use Distributed Computing, you first need to setup a parallel profile. We have provided the profile for you, you can either import it in MATLAB command line:
 
-```bash
+```console
     >> parallel.importProfile('/apps/all/MATLAB/2015a-EDU/SalomonPBSPro.settings')
 
     ans =
@@ -71,10 +71,9 @@ With the new mode, MATLAB itself launches the workers via PBS, so you can either
 
 Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/).
 
-```bash
-    $ xhost +
-    $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=1 -l walltime=00:30:00
-    -l feature__matlab__MATLAB=1
+```console
+$ xhost +
+$ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=1 -l walltime=00:30:00 -l feature__matlab__MATLAB=1
 ```
 
 This qsub command example shows how to run Matlab on a single node.
@@ -83,9 +82,9 @@ The second part of the command shows how to request all necessary licenses. In t
 
 Once the access to compute nodes is granted by PBS, user can load following modules and start Matlab:
 
-```bash
-    r1i0n17$ module load MATLAB/2015b-EDU
-    r1i0n17$ matlab &
+```console
+r1i0n17$ ml MATLAB/2015b-EDU
+r1i0n17$ matlab &
 ```
 
 ### Parallel Matlab Batch Job in Local Mode
@@ -119,15 +118,15 @@ This script may be submitted directly to the PBS workload manager via the qsub c
 
 Submit the jobscript using qsub
 
-```bash
-    $ qsub ./jobscript
+```console
+$ qsub ./jobscript
 ```
 
 ### Parallel Matlab Local Mode Program Example
 
 The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started.
 
-```bash
+```console
     cluster = parcluster('local')
 ```
 
@@ -138,7 +137,7 @@ This script creates scheduler object "cluster" of type "local" that starts worke
 
 The last step is to start matlabpool with "cluster" object and correct number of workers. We have 24 cores per node, so we start 24 workers.
 
-```bash
+```console
     parpool(cluster,16);
 
 
@@ -150,7 +149,7 @@ The last step is to start matlabpool with "cluster" object and correct number of
 
 The complete example showing how to use Distributed Computing Toolbox in local mode is shown here.
 
-```bash
+```console
     cluster = parcluster('local');
     cluster
 
@@ -183,7 +182,7 @@ This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBS
 
 This is an example of m-script using PBS mode:
 
-```bash
+```console
     cluster = parcluster('SalomonPBSPro');
     set(cluster, 'SubmitArguments', '-A OPEN-0-0');
     set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=16');
@@ -224,7 +223,7 @@ For this method, you need to use SalomonDirect profile, import it using [the sam
 
 This is an example of m-script using direct mode:
 
-```bash
+```console
     parallel.importProfile('/apps/all/MATLAB/2015a-EDU/SalomonDirect.settings')
     cluster = parcluster('SalomonDirect');
     set(cluster, 'NumWorkers', 48);
diff --git a/docs.it4i/anselm/software/numerical-languages/matlab_1314.md b/docs.it4i/anselm/software/numerical-languages/matlab_1314.md
index 8c1012531c67f272907e154addb5f336e636eaf6..41dca05619875b20806beb1a8dde7c255347bd89 100644
--- a/docs.it4i/anselm/software/numerical-languages/matlab_1314.md
+++ b/docs.it4i/anselm/software/numerical-languages/matlab_1314.md
@@ -12,14 +12,14 @@ Matlab is available in the latest stable version. There are always two variants
 
 To load the latest version of Matlab load the module
 
-```bash
-    $ module load matlab
+```console
+$ ml matlab
 ```
 
 By default the EDU variant is marked as default. If you need other version or variant, load the particular version. To obtain the list of available versions use
 
-```bash
-    $ module avail matlab
+```console
+$ ml matlab
 ```
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
@@ -30,13 +30,13 @@ Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so u
 
 To run Matlab with GUI, use
 
-```bash
+```console
 $ matlab
 ```
 
 To run Matlab in text mode, without the Matlab Desktop GUI environment, use
 
-```bash
+```console
 $ matlab -nodesktop -nosplash
 ```
 
@@ -50,11 +50,9 @@ Recommended parallel mode for running parallel Matlab on Anselm is MPIEXEC mode.
 
 For the performance reasons Matlab should use system MPI. On Anselm the supported MPI implementation for Matlab is Intel MPI. To switch to system MPI user has to override default Matlab setting by creating new configuration file in its home directory. The path and file name has to be exactly the same as in the following listing:
 
-```bash
+```console
 $ vim ~/matlab/mpiLibConf.m
-```
 
-```bash
 function [lib, extras] = mpiLibConf
 %MATLAB MPI Library overloading for Infiniband Networks
 
@@ -78,10 +76,9 @@ System MPI library allows Matlab to communicate through 40 Gbit/s InfiniBand QDR
 
 Once this file is in place, user can request resources from PBS. Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see.
 
-```bash
-    $ xhost +
-    $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=4:ncpus=16:mpiprocs=16 -l walltime=00:30:00
-    -l feature__matlab__MATLAB=1
+```console
+$ xhost +
+$ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=4:ncpus=16:mpiprocs=16 -l walltime=00:30:00 -l feature__matlab__MATLAB=1
 ```
 
 This qsub command example shows how to run Matlab with 32 workers in following configuration: 2 nodes (use all 16 cores per node) and 16 workers = mpirocs per node (-l select=2:ncpus=16:mpiprocs=16). If user requires to run smaller number of workers per node then the "mpiprocs" parameter has to be changed.
@@ -90,9 +87,9 @@ The second part of the command shows how to request all necessary licenses. In t
 
 Once the access to compute nodes is granted by PBS, user can load following modules and start Matlab:
 
-```bash
-    cn79$ module load matlab/R2013a-EDU
-    cn79$ module load impi/4.1.1.036
+```console
+    cn79$ ml matlab/R2013a-EDU
+    cn79$ ml impi/4.1.1.036
     cn79$ matlab &
 ```
 
@@ -128,7 +125,7 @@ This script may be submitted directly to the PBS workload manager via the qsub c
 
 Submit the jobscript using qsub
 
-```bash
+```console
 $ qsub ./jobscript
 ```
 
@@ -136,7 +133,7 @@ $ qsub ./jobscript
 
 The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started.
 
-```bash
+```console
 sched = findResource('scheduler', 'type', 'mpiexec');
 set(sched, 'MpiexecFileName', '/apps/intel/impi/4.1.1/bin/mpirun');
 set(sched, 'EnvironmentSetMethod', 'setenv');
@@ -149,7 +146,7 @@ This script creates scheduler object "sched" of type "mpiexec" that starts worke
 
 The last step is to start matlabpool with "sched" object and correct number of workers. In this case qsub asked for total number of 32 cores, therefore the number of workers is also set to 32.
 
-```bash
+```console
 matlabpool(sched,32);
 
 
@@ -161,7 +158,7 @@ matlabpool close
 
 The complete example showing how to use Distributed Computing Toolbox is show here.
 
-```bash
+```console
 sched = findResource('scheduler', 'type', 'mpiexec');
 set(sched, 'MpiexecFileName', '/apps/intel/impi/4.1.1/bin/mpirun')
 set(sched, 'EnvironmentSetMethod', 'setenv')
diff --git a/docs.it4i/anselm/software/numerical-languages/octave.md b/docs.it4i/anselm/software/numerical-languages/octave.md
index 19142eb0f6b9150df56c553ba395d385c4b92a47..4fbb52979a38da23ec3a9a3c93e456383f99ab22 100644
--- a/docs.it4i/anselm/software/numerical-languages/octave.md
+++ b/docs.it4i/anselm/software/numerical-languages/octave.md
@@ -6,7 +6,7 @@ GNU Octave is a high-level interpreted language, primarily intended for numerica
 
 Two versions of octave are available on Anselm, via module
 
-| Version                                               | module                    |
+| Version                                              | module                    |
 | ----------------------------------------------------- | ------------------------- |
 | Octave 3.8.2, compiled with GCC and Multithreaded MKL | Octave/3.8.2-gimkl-2.11.5 |
 | Octave 4.0.1, compiled with GCC and Multithreaded MKL | Octave/4.0.1-gimkl-2.11.5 |
@@ -14,14 +14,16 @@ Two versions of octave are available on Anselm, via module
 
 ## Modules and Execution
 
-    $ module load Octave
+```console
+$ ml Octave
+```
 
 The octave on Anselm is linked to highly optimized MKL mathematical library. This provides threaded parallelization to many octave kernels, notably the linear algebra subroutines. Octave runs these heavy calculation kernels without any penalty. By default, octave would parallelize to 16 threads. You may control the threads by setting the OMP_NUM_THREADS environment variable.
 
 To run octave interactively, log in with ssh -X parameter for X11 forwarding. Run octave:
 
-```bash
-    $ octave
+```console
+$ octave
 ```
 
 To run octave in batch mode, write an octave script, then write a bash jobscript and execute via the qsub command. By default, octave will use 16 threads when running MKL kernels.
@@ -52,8 +54,8 @@ This script may be submitted directly to the PBS workload manager via the qsub c
 
 The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c code. This is very useful for running native c subroutines in octave environment.
 
-```bash
-    $ mkoctfile -v
+```console
+$ mkoctfile -v
 ```
 
 Octave may use MPI for interprocess communication This functionality is currently not supported on Anselm cluster. In case you require the octave interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/).
@@ -68,11 +70,11 @@ Octave can accelerate BLAS type operations (in particular the Matrix Matrix mult
 
 Example
 
-```bash
-    $ export OFFLOAD_REPORT=2
-    $ export MKL_MIC_ENABLE=1
-    $ module load octave
-    $ octave -q
+```console
+$ export OFFLOAD_REPORT=2
+$ export MKL_MIC_ENABLE=1
+$ ml octave
+$ octave -q
     octave:1> A=rand(10000); B=rand(10000);
     octave:2> tic; C=A*B; toc
     [MKL] [MIC --] [AO Function]    DGEMM
@@ -101,8 +103,8 @@ variable.
 
 To use Octave on a node with Xeon Phi:
 
-```bash
-    $ ssh mic0                                               # login to the MIC card
-    $ source /apps/tools/octave/3.8.2-mic/bin/octave-env.sh # set up environment variables
-    $ octave -q /apps/tools/octave/3.8.2-mic/example/test0.m # run an example
+```console
+$ ssh mic0                                               # login to the MIC card
+$ source /apps/tools/octave/3.8.2-mic/bin/octave-env.sh  # set up environment variables
+$ octave -q /apps/tools/octave/3.8.2-mic/example/test0.m # run an example
 ```
diff --git a/docs.it4i/anselm/software/numerical-languages/r.md b/docs.it4i/anselm/software/numerical-languages/r.md
index d70ea9026f50ed82ff789a232a21de97b7b472cb..8916ccb7cc21a1e9bf7de6bda24d1a38bdf82263 100644
--- a/docs.it4i/anselm/software/numerical-languages/r.md
+++ b/docs.it4i/anselm/software/numerical-languages/r.md
@@ -21,8 +21,8 @@ The R version 3.0.1 is available on Anselm, along with GUI interface Rstudio
 | **R**       | R 3.0.1      | R       |
 | **Rstudio** | Rstudio 0.97 | Rstudio |
 
-```bash
-    $ module load R
+```console
+$ ml R
 ```
 
 ## Execution
@@ -33,9 +33,9 @@ The R on Anselm is linked to highly optimized MKL mathematical library. This pro
 
 To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 forwarding. Run rstudio:
 
-```bash
-    $ module load Rstudio
-    $ rstudio
+```console
+$ ml Rstudio
+$ rstudio
 ```
 
 ### Batch Execution
@@ -78,14 +78,14 @@ The package parallel provides support for parallel computation, including by for
 
 The package is activated this way:
 
-```bash
-    $ R
+```console
+$ R
     > library(parallel)
 ```
 
 More information and examples may be obtained directly by reading the documentation available in R
 
-```bash
+```console
     > ?parallel
     > library(help = "parallel")
     > vignette("parallel")
@@ -104,7 +104,7 @@ The forking is the most simple to use. Forking family of functions provide paral
 
 Forking example:
 
-```bash
+```r
     library(parallel)
 
     #integrand function
@@ -138,8 +138,8 @@ Forking example:
 
 The above example is the classic parallel example for calculating the number π. Note the **detectCores()** and **mclapply()** functions. Execute the example as:
 
-```bash
-    $ R --slave --no-save --no-restore -f pi3p.R
+```console
+$ R --slave --no-save --no-restore -f pi3p.R
 ```
 
 Every evaluation of the integrad function runs in parallel on different process.
@@ -155,9 +155,9 @@ Read more on Rmpi at <http://cran.r-project.org/web/packages/Rmpi/>, reference m
 
 When using package Rmpi, both openmpi and R modules must be loaded
 
-```bash
-    $ module load openmpi
-    $ module load R
+```console
+$ ml OpenMPI
+$ ml R
 ```
 
 Rmpi may be used in three basic ways. The static approach is identical to executing any other MPI programm. In addition, there is Rslaves dynamic MPI approach and the mpi.apply approach. In the following section, we will use the number π integration example, to illustrate all these concepts.
@@ -168,7 +168,7 @@ Static Rmpi programs are executed via mpiexec, as any other MPI programs. Number
 
 Static Rmpi example:
 
-```cpp
+```r
     library(Rmpi)
 
     #integrand function
@@ -216,8 +216,8 @@ The above is the static MPI example for calculating the number π. Note the **li
 
 Execute the example as:
 
-```bash
-    $ mpiexec R --slave --no-save --no-restore -f pi3.R
+```console
+$ mpiexec R --slave --no-save --no-restore -f pi3.R
 ```
 
 ### Dynamic Rmpi
@@ -226,7 +226,7 @@ Dynamic Rmpi programs are executed by calling the R directly. openmpi module mus
 
 Dynamic Rmpi example:
 
-```cpp
+```r
     #integrand function
     f <- function(i,h) {
     x <- h*(i-0.5)
@@ -288,8 +288,8 @@ The above example is the dynamic MPI example for calculating the number π. Both
 
 Execute the example as:
 
-```bash
-    $ R --slave --no-save --no-restore -f pi3Rslaves.R
+```console
+$ R --slave --no-save --no-restore -f pi3Rslaves.R
 ```
 
 ### mpi.apply Rmpi
@@ -303,7 +303,7 @@ Execution is identical to other dynamic Rmpi programs.
 
 mpi.apply Rmpi example:
 
-```bash
+```r
     #integrand function
     f <- function(i,h) {
     x <- h*(i-0.5)
@@ -355,8 +355,8 @@ The above is the mpi.apply MPI example for calculating the number π. Only the s
 
 Execute the example as:
 
-```bash
-    $ R --slave --no-save --no-restore -f pi3parSapply.R
+```console
+$ R --slave --no-save --no-restore -f pi3parSapply.R
 ```
 
 ## Combining Parallel and Rmpi
diff --git a/docs.it4i/anselm/software/numerical-libraries/fftw.md b/docs.it4i/anselm/software/numerical-libraries/fftw.md
index 038e1223a44cde79a37f2f7fe59fab9f7e5a8e8e..7345a811672a725f3916d601d4164e377580b3ab 100644
--- a/docs.it4i/anselm/software/numerical-libraries/fftw.md
+++ b/docs.it4i/anselm/software/numerical-libraries/fftw.md
@@ -17,8 +17,8 @@ Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each comp
 | FFTW2 gcc2.1.5 | OpenMPI         | fftw2-mpi/2.1.5-gcc | -lfftw_mpi                          |
 | FFTW2 gcc2.1.5 | IntelMPI        | fftw2-mpi/2.1.5-gcc | -lfftw_mpi                          |
 
-```bash
-    $ module load fftw3
+```console
+$ ml fftw3 **or** ml FFTW
 ```
 
 The module sets up environment variables, required for linking and running FFTW enabled applications. Make sure that the choice of FFTW module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results.
@@ -62,11 +62,10 @@ The module sets up environment variables, required for linking and running FFTW
 
 Load modules and compile:
 
-```bash
-    $ module load impi intel
-    $ module load fftw3-mpi
-
-    $ mpicc testfftw3mpi.c -o testfftw3mpi.x -Wl,-rpath=$LIBRARY_PATH -lfftw3_mpi
+```console
+$ ml intel
+$ ml fftw3-mpi
+$ mpicc testfftw3mpi.c -o testfftw3mpi.x -Wl,-rpath=$LIBRARY_PATH -lfftw3_mpi
 ```
 
 Run the example as [Intel MPI program](../mpi/running-mpich2/).
diff --git a/docs.it4i/anselm/software/numerical-libraries/gsl.md b/docs.it4i/anselm/software/numerical-libraries/gsl.md
index 6b5308df3dabbbfe12a8763a955562e311eff35a..3299492ddbe6270c70a1ee1fbc4228b4e3ca5c15 100644
--- a/docs.it4i/anselm/software/numerical-libraries/gsl.md
+++ b/docs.it4i/anselm/software/numerical-libraries/gsl.md
@@ -51,8 +51,8 @@ The GSL 1.16 is available on Anselm, compiled for GNU and Intel compiler. These
 | gsl/1.16-gcc          | gcc 4.8.6 |
 | gsl/1.16-icc(default) | icc       |
 
-```bash
-     $ module load gsl
+```console
+$ ml gsl
 ```
 
 The module sets up environment variables, required for linking and running GSL enabled applications. This particular command loads the default module, which is gsl/1.16-icc
@@ -63,19 +63,19 @@ Load an appropriate gsl module. Link using **-lgsl** switch to link your code ag
 
 ### Compiling and Linking With Intel Compilers
 
-```bash
-    $ module load intel
-    $ module load gsl
-    $ icc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -mkl -lgsl
+```console
+$ ml intel
+$ ml gsl
+$ icc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -mkl -lgsl
 ```
 
 ### Compiling and Linking With GNU Compilers
 
-```bash
-    $ module load gcc
-    $ module load mkl
-    $ module load gsl/1.16-gcc
-    $ gcc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lgsl
+```console
+$ ml gcc
+$ ml imkl **or** ml mkl
+$ ml gsl/1.16-gcc
+$ gcc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lgsl
 ```
 
 ## Example
@@ -136,9 +136,10 @@ Following is an example of discrete wavelet transform implemented by GSL:
 
 Load modules and compile:
 
-```bash
-    $ module load intel gsl
-    icc dwt.c -o dwt.x -Wl,-rpath=$LIBRARY_PATH -mkl -lgsl
+```console
+$ ml intel
+$ ml gsl
+$ icc dwt.c -o dwt.x -Wl,-rpath=$LIBRARY_PATH -mkl -lgsl
 ```
 
 In this example, we compile the dwt.c code using the Intel compiler and link it to the MKL and GSL library, note the -mkl and -lgsl options. The library search path is compiled in, so that no modules are necessary to run the code.
diff --git a/docs.it4i/anselm/software/numerical-libraries/hdf5.md b/docs.it4i/anselm/software/numerical-libraries/hdf5.md
index d9abd72c405ab3ff867203fbe7c9408e9e7c5d7c..13f626264cab05dd93d091b0752d1a4a8df2dcf5 100644
--- a/docs.it4i/anselm/software/numerical-libraries/hdf5.md
+++ b/docs.it4i/anselm/software/numerical-libraries/hdf5.md
@@ -16,8 +16,9 @@ Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, comp
 | HDF5 gcc parallel MPI | pthread, OpenMPI 1.6.5, gcc 4.8.1 | hdf5-parallel/1.8.13-gcc   | $HDF5_INC $HDF5_SHLIB | Not supported           | $HDF5_INC $HDF5_F90_LIB |
 | HDF5 gcc parallel MPI | pthread, OpenMPI 1.8.1, gcc 4.9.0 | hdf5-parallel/1.8.13-gcc49 | $HDF5_INC $HDF5_SHLIB | Not supported           | $HDF5_INC $HDF5_F90_LIB |
 
-```bash
-    $ module load hdf5-parallel
+```console
+
+$ ml hdf5-parallel
 ```
 
 The module sets up environment variables, required for linking and running HDF5 enabled applications. Make sure that the choice of HDF5 module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results.
@@ -77,11 +78,10 @@ The module sets up environment variables, required for linking and running HDF5
 
 Load modules and compile:
 
-```bash
-    $ module load intel impi
-    $ module load hdf5-parallel
-
-    $ mpicc hdf5test.c -o hdf5test.x -Wl,-rpath=$LIBRARY_PATH $HDF5_INC $HDF5_SHLIB
+```console
+$ ml intel
+$ ml hdf5-parallel
+$ mpicc hdf5test.c -o hdf5test.x -Wl,-rpath=$LIBRARY_PATH $HDF5_INC $HDF5_SHLIB
 ```
 
 Run the example as [Intel MPI program](../mpi/running-mpich2/).
diff --git a/docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md b/docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md
index 8a79b9961d7f158bb369dc65f6ea6e21896b09ac..5f3834ffa84ee0b1fb73d01dfa0aa1a2106566b0 100644
--- a/docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md
+++ b/docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md
@@ -6,8 +6,8 @@ Intel libraries for high performance in numerical computing
 
 Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL unites and provides these basic components: BLAS, LAPACK, ScaLapack, PARDISO, FFT, VML, VSL, Data fitting, Feast Eigensolver and many more.
 
-```bash
-    $ module load mkl
+```console
+$ ml mkl **or** ml imkl
 ```
 
 Read more at the [Intel MKL](../intel-suite/intel-mkl/) page.
@@ -16,8 +16,8 @@ Read more at the [Intel MKL](../intel-suite/intel-mkl/) page.
 
 Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is available, via module ipp. The IPP is a library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax and many more.
 
-```bash
-    $ module load ipp
+```console
+$ ml ipp
 ```
 
 Read more at the [Intel IPP](../intel-suite/intel-integrated-performance-primitives/) page.
@@ -26,8 +26,8 @@ Read more at the [Intel IPP](../intel-suite/intel-integrated-performance-primiti
 
 Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. It is designed to promote scalable data parallel programming. Additionally, it fully supports nested parallelism, so you can build larger parallel components from smaller parallel components. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner.
 
-```bash
-    $ module load tbb
+```console
+$ ml tbb
 ```
 
 Read more at the [Intel TBB](../intel-suite/intel-tbb/) page.
diff --git a/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md b/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md
index 8ce0b79e0ce63aff1cfea48f72e009ad111a79a1..64c443796b11a378345e9aa93da94af791cf5e5a 100644
--- a/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md
+++ b/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md
@@ -6,8 +6,8 @@ Next generation dense algebra library for heterogeneous systems with accelerator
 
 To be able to compile and link code with MAGMA library user has to load following module:
 
-```bash
-    $ module load magma/1.3.0-mic
+```console
+$ ml magma/1.3.0-mic
 ```
 
 To make compilation more user friendly module also sets these two environment variables:
@@ -20,10 +20,9 @@ To make compilation more user friendly module also sets these two environment va
 
 Compilation example:
 
-```bash
-    $ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall $MAGMA_INC -c testing_dgetrf_mic.cpp -o testing_dgetrf_mic.o
-
-    $ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall -fPIC -Xlinker -zmuldefs -Wall -DNOCHANGE -DHOST testing_dgetrf_mic.o  -o testing_dgetrf_mic $MAGMA_LIBS
+```console
+$ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall $MAGMA_INC -c testing_dgetrf_mic.cpp -o testing_dgetrf_mic.o
+$ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall -fPIC -Xlinker -zmuldefs -Wall -DNOCHANGE -DHOST testing_dgetrf_mic.o  -o testing_dgetrf_mic $MAGMA_LIBS
 ```
 
 ### Running MAGMA Code
@@ -44,12 +43,10 @@ MAGMA implementation for Intel MIC requires a MAGMA server running on accelerato
 
 To test if the MAGMA server runs properly we can run one of examples that are part of the MAGMA installation:
 
-```bash
-    [user@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic
-
-    [user@cn204 ~]$ export OMP_NUM_THREADS=16
-
-    [lriha@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic
+```console
+[user@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic
+[user@cn204 ~]$ export OMP_NUM_THREADS=16
+[lriha@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic
     Usage: /apps/libs/magma-mic/magmamic-1.3.0/testing/testing_dgetrf_mic [options] [-h|--help]
 
       M     N     CPU GFlop/s (sec)   MAGMA GFlop/s (sec)   ||PA-LU||/(||A||*N)
diff --git a/docs.it4i/anselm/software/numerical-libraries/petsc.md b/docs.it4i/anselm/software/numerical-libraries/petsc.md
index 528d13ddbcaffdc9f8b0a80bee379b05602317d7..214e4074ae075aec5ce70bfb3705bab3e7600b50 100644
--- a/docs.it4i/anselm/software/numerical-libraries/petsc.md
+++ b/docs.it4i/anselm/software/numerical-libraries/petsc.md
@@ -18,9 +18,9 @@ PETSc (Portable, Extensible Toolkit for Scientific Computation) is a suite of bu
 
 You can start using PETSc on Anselm by loading the PETSc module. Module names obey this pattern:
 
-```bash
-    # module load petsc/version-compiler-mpi-blas-variant, e.g.
-      module load petsc/3.4.4-icc-impi-mkl-opt
+```console
+$# ml petsc/version-compiler-mpi-blas-variant, e.g.
+$ ml petsc/3.4.4-icc-impi-mkl-opt
 ```
 
 where `variant` is replaced by one of `{dbg, opt, threads-dbg, threads-opt}`. The `opt` variant is compiled without debugging information (no `-g` option) and with aggressive compiler optimizations (`-O3 -xAVX`). This variant is suitable for performance measurements and production runs. In all other cases use the debug (`dbg`) variant, because it contains debugging information, performs validations and self-checks, and provides a clear stack trace and message in case of an error. The other two variants `threads-dbg` and `threads-opt` are `dbg` and `opt`, respectively, built with [OpenMP and pthreads threading support](https://www.mcs.anl.gov/petsc/miscellaneous/petscthreads.html).
diff --git a/docs.it4i/anselm/software/numerical-libraries/trilinos.md b/docs.it4i/anselm/software/numerical-libraries/trilinos.md
index 42f8bc0dc4ca5318cca883193e5fc61eb207b9b1..36688e989a9b83b657707d988472109144e02226 100644
--- a/docs.it4i/anselm/software/numerical-libraries/trilinos.md
+++ b/docs.it4i/anselm/software/numerical-libraries/trilinos.md
@@ -28,22 +28,22 @@ Currently, Trilinos in version 11.2.3 compiled with Intel Compiler is installed
 
 First, load the appropriate module:
 
-```bash
-    $ module load trilinos
+```console
+$ ml trilinos
 ```
 
 For the compilation of CMake-aware project, Trilinos provides the FIND_PACKAGE( Trilinos ) capability, which makes it easy to build against Trilinos, including linking against the correct list of libraries. For details, see <http://trilinos.sandia.gov/Finding_Trilinos.txt>
 
 For compiling using simple makefiles, Trilinos provides Makefile.export system, which allows users to include important Trilinos variables directly into their makefiles. This can be done simply by inserting the following line into the makefile:
 
-```bash
-    include Makefile.export.Trilinos
+```cpp
+include Makefile.export.Trilinos
 ```
 
 or
 
-```bash
-    include Makefile.export.<package>
+```cpp
+include Makefile.export.<package>
 ```
 
 if you are interested only in a specific Trilinos package. This will give you access to the variables such as Trilinos_CXX_COMPILER, Trilinos_INCLUDE_DIRS, Trilinos_LIBRARY_DIRS etc. For the detailed description and example makefile see <http://trilinos.sandia.gov/Export_Makefile.txt>.
diff --git a/docs.it4i/anselm/software/nvidia-cuda.md b/docs.it4i/anselm/software/nvidia-cuda.md
index 392811efa72e5275307c29c34a13b462c688827e..6b06d9384302e0e023f807dcb2eb983a11b3b73a 100644
--- a/docs.it4i/anselm/software/nvidia-cuda.md
+++ b/docs.it4i/anselm/software/nvidia-cuda.md
@@ -6,48 +6,49 @@ Guide to NVIDIA CUDA Programming and GPU Usage
 
 The default programming model for GPU accelerators on Anselm is Nvidia CUDA. To set up the environment for CUDA use
 
-```bash
-    $ module load cuda
+```console
+$ ml av cuda
+$ ml cuda **or** ml CUDA
 ```
 
 If the user code is hybrid and uses both CUDA and MPI, the MPI environment has to be set up as well. One way to do this is to use the PrgEnv-gnu module, which sets up correct combination of GNU compiler and MPI library.
 
-```bash
-    $ module load PrgEnv-gnu
+```console
+$ ml PrgEnv-gnu
 ```
 
 CUDA code can be compiled directly on login1 or login2 nodes. User does not have to use compute nodes with GPU accelerator for compilation. To compile a CUDA source code, use nvcc compiler.
 
-```bash
-    $ nvcc --version
+```console
+$ nvcc --version
 ```
 
 CUDA Toolkit comes with large number of examples, that can be helpful to start with. To compile and test these examples user should copy them to its home directory
 
-```bash
-    $ cd ~
-    $ mkdir cuda-samples
-    $ cp -R /apps/nvidia/cuda/6.5.14/samples/* ~/cuda-samples/
+```console
+$ cd ~
+$ mkdir cuda-samples
+$ cp -R /apps/nvidia/cuda/6.5.14/samples/* ~/cuda-samples/
 ```
 
 To compile an examples, change directory to the particular example (here the example used is deviceQuery) and run "make" to start the compilation
 
-```bash
-    $ cd ~/cuda-samples/1_Utilities/deviceQuery
-    $ make
+```console
+$ cd ~/cuda-samples/1_Utilities/deviceQuery
+$ make
 ```
 
 To run the code user can use PBS interactive session to get access to a node from qnvidia queue (note: use your project name with parameter -A in the qsub command) and execute the binary file
 
-```bash
-    $ qsub -I -q qnvidia -A OPEN-0-0
-    $ module load cuda
-    $ ~/cuda-samples/1_Utilities/deviceQuery/deviceQuery
+```console
+$ qsub -I -q qnvidia -A OPEN-0-0
+$ ml cuda
+$ ~/cuda-samples/1_Utilities/deviceQuery/deviceQuery
 ```
 
 Expected output of the deviceQuery example executed on a node with Tesla K20m is
 
-```bash
+```console
     CUDA Device Query (Runtime API) version (CUDART static linking)
 
     Detected 1 CUDA Capable device(s)
@@ -90,8 +91,8 @@ Expected output of the deviceQuery example executed on a node with Tesla K20m is
 
 In this section we provide a basic CUDA based vector addition code example. You can directly copy and paste the code to test it.
 
-```bash
-    $ vim test.cu
+```console
+$ vim test.cu
 
     #define N (2048*2048)
     #define THREADS_PER_BLOCK 512
@@ -180,16 +181,16 @@ In this section we provide a basic CUDA based vector addition code example. You
 
 This code can be compiled using following command
 
-```bash
-    $ nvcc test.cu -o test_cuda
+```console
+$ nvcc test.cu -o test_cuda
 ```
 
 To run the code use interactive PBS session to get access to one of the GPU accelerated nodes
 
-```bash
-    $ qsub -I -q qnvidia -A OPEN-0-0
-    $ module load cuda
-    $ ./test.cuda
+```console
+$ qsub -I -q qnvidia -A OPEN-0-0
+$ ml cuda
+$ ./test.cuda
 ```
 
 ## CUDA Libraries
@@ -287,21 +288,22 @@ SAXPY function multiplies the vector x by the scalar alpha and adds it to the ve
 
 To compile the code using NVCC compiler a "-lcublas" compiler flag has to be specified:
 
-```bash
-    $ module load cuda
-    $ nvcc -lcublas test_cublas.cu -o test_cublas_nvcc
+```console
+$ ml cuda
+$ nvcc -lcublas test_cublas.cu -o test_cublas_nvcc
 ```
 
 To compile the same code with GCC:
 
-```bash
-    $ module load cuda
-    $ gcc -std=c99 test_cublas.c -o test_cublas_icc -lcublas -lcudart
+```console
+$ ml cuda
+$ gcc -std=c99 test_cublas.c -o test_cublas_icc -lcublas -lcudart
 ```
 
 To compile the same code with Intel compiler:
 
-```bash
-    $ module load cuda intel
-    $ icc -std=c99 test_cublas.c -o test_cublas_icc -lcublas -lcudart
+```console
+$ ml cuda
+$ ml intel
+$ icc -std=c99 test_cublas.c -o test_cublas_icc -lcublas -lcudart
 ```
diff --git a/docs.it4i/anselm/software/omics-master/overview.md b/docs.it4i/anselm/software/omics-master/overview.md
index 8d3eb3d3ea5368b1b0d09cec9ec8ca7006fbf1c4..d09a0030cf06246720287c6d0ffad4bfd11825a6 100644
--- a/docs.it4i/anselm/software/omics-master/overview.md
+++ b/docs.it4i/anselm/software/omics-master/overview.md
@@ -175,16 +175,16 @@ resources. We successfully solved the problem of storing data released in BioPAX
 
 First of all, we should load ngsPipeline module:
 
-```bash
-    $ module load ngsPipeline
+```console
+$ ml ngsPipeline
 ```
 
 This command will load python/2.7.5 module and all the required modules (hpg-aligner, gatk, etc)
 
 If we launch ngsPipeline with ‘-h’, we will get the usage help:
 
-```bash
-    $ ngsPipeline -h
+```console
+$ ngsPipeline -h
     Usage: ngsPipeline.py [-h] -i INPUT -o OUTPUT -p PED --project PROJECT --queue
       QUEUE [--stages-path STAGES_PATH] [--email EMAIL]
      [--prefix PREFIX] [-s START] [-e END] --log
@@ -211,7 +211,7 @@ If we launch ngsPipeline with ‘-h’, we will get the usage help:
 
 Let us see a brief description of the arguments:
 
-```bash
+```console
       -h --help. Show the help.
 
       -i, --input. The input data directory. This directory must to have a special structure. We have to create one folder per sample (with the same name). These folders will host the fastq files. These fastq files must have the following pattern “sampleName” + “_” + “1 or 2” + “.fq”. 1 for the first pair (in paired-end sequences), and 2 for the
@@ -242,7 +242,7 @@ This is an example usage of NGSpipeline:
 
 We have a folder with the following structure in
 
-```bash
+```console
 /apps/bio/omics/1.0/sample_data/ >:
 
     /apps/bio/omics/1.0/sample_data
@@ -258,7 +258,7 @@ We have a folder with the following structure in
 
 The ped file ( file.ped) contains the following info:
 
-```bash
+```console
     #family_ID sample_ID parental_ID maternal_ID sex phenotype
     FAM sample_A 0 0 1 1
     FAM sample_B 0 0 2 2
@@ -266,24 +266,24 @@ The ped file ( file.ped) contains the following info:
 
 Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](../../storage/storage/):
 
-```bash
-    $ module load ngsPipeline
-    $ mkdir -p /scratch/$USER/omics/results
-    $ cp -r /apps/bio/omics/1.0/sample_data /scratch/$USER/omics/
+```console
+$ ml ngsPipeline
+$ mkdir -p /scratch/$USER/omics/results
+$ cp -r /apps/bio/omics/1.0/sample_data /scratch/$USER/omics/
 ```
 
 Now, we can launch the pipeline (replace OPEN-0-0 with your Project ID):
 
-```bash
-    $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped --project OPEN-0-0 --queue qprod
+```console
+$ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped --project OPEN-0-0 --queue qprod
 ```
 
 This command submits the processing [jobs to the queue](../../job-submission-and-execution/).
 
 If we want to re-launch the pipeline from stage 4 until stage 20 we should use the next command:
 
-```bash
-    $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped -s 4 -e 20 --project OPEN-0-0 --queue qprod
+```console
+$ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped -s 4 -e 20 --project OPEN-0-0 --queue qprod
 ```
 
 ## Details on the Pipeline
diff --git a/docs.it4i/anselm/software/openfoam.md b/docs.it4i/anselm/software/openfoam.md
index a2c98e3f2d84e11b0e73b3b6c7d9c083422101bb..865f054d326d17591cf623d0ed9d492d342e01ed 100644
--- a/docs.it4i/anselm/software/openfoam.md
+++ b/docs.it4i/anselm/software/openfoam.md
@@ -31,13 +31,13 @@ openfoam\<VERSION\>-\<COMPILER\>\<openmpiVERSION\>-\<PRECISION\>
 
 To check available modules use
 
-```bash
-    $ module avail
+```console
+$ ml av
 ```
 
 In /opt/modules/modulefiles/engineering you can see installed engineering softwares:
 
-```bash
+```console
     ------------------------------------ /opt/modules/modulefiles/engineering -------------------------------------------------------------
     ansys/14.5.x               matlab/R2013a-COM                                openfoam/2.2.1-icc-impi4.1.1.036-DP
     comsol/43b-COM             matlab/R2013a-EDU                                openfoam/2.2.1-icc-openmpi1.6.5-DP
@@ -51,10 +51,9 @@ For information how to use modules please [look here](../environment-and-modules
 
 To create OpenFOAM environment on ANSELM give the commands:
 
-```bash
-    $ module load openfoam/2.2.1-icc-openmpi1.6.5-DP
-
-    $ source $FOAM_BASHRC
+```console
+$ ml openfoam/2.2.1-icc-openmpi1.6.5-DP
+$ source $FOAM_BASHRC
 ```
 
 !!! note
@@ -62,28 +61,28 @@ To create OpenFOAM environment on ANSELM give the commands:
 
 Create a project directory within the $HOME/OpenFOAM directory named \<USER\>-\<OFversion\> and create a directory named run within it, e.g. by typing:
 
-```bash
-    $ mkdir -p $FOAM_RUN
+```console
+$ mkdir -p $FOAM_RUN
 ```
 
 Project directory is now available by typing:
 
-```bash
-    $ cd /home/<USER>/OpenFOAM/<USER>-<OFversion>/run
+```console
+$ cd /home/<USER>/OpenFOAM/<USER>-<OFversion>/run
 ```
 
 \<OFversion\> - for example \<2.2.1\>
 
 or
 
-```bash
-    $ cd $FOAM_RUN
+```console
+$ cd $FOAM_RUN
 ```
 
 Copy the tutorial examples directory in the OpenFOAM distribution to the run directory:
 
-```bash
-    $ cp -r $FOAM_TUTORIALS $FOAM_RUN
+```console
+$ cp -r $FOAM_TUTORIALS $FOAM_RUN
 ```
 
 Now you can run the first case for example incompressible laminar flow in a cavity.
@@ -108,8 +107,8 @@ Create a Bash script test.sh
 
 Job submission
 
-```bash
-    $ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh
+```console
+$ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh
 ```
 
 For information about job submission please [look here](../job-submission-and-execution/).
@@ -139,8 +138,8 @@ First we must run serial application bockMesh and decomposePar for preparation o
 
 Job submission
 
-```bash
-    $ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh
+```console
+$ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh
 ```
 
 This job create simple block mesh and domain decomposition. Check your decomposition, and submit parallel computation:
@@ -174,38 +173,38 @@ nproc – number of subdomains
 
 Job submission
 
-```bash
-    $ qsub testParallel.pbs
+```console
+$ qsub testParallel.pbs
 ```
 
 ## Compile Your Own Solver
 
 Initialize OpenFOAM environment before compiling your solver
 
-```bash
-    $ module load openfoam/2.2.1-icc-openmpi1.6.5-DP
-    $ source $FOAM_BASHRC
-    $ cd $FOAM_RUN/
+```console
+$ ml openfoam/2.2.1-icc-openmpi1.6.5-DP
+$ source $FOAM_BASHRC
+$ cd $FOAM_RUN/
 ```
 
 Create directory applications/solvers in user directory
 
-```bash
-    $ mkdir -p applications/solvers
-    $ cd applications/solvers
+```console
+$ mkdir -p applications/solvers
+$ cd applications/solvers
 ```
 
 Copy icoFoam solver’s source files
 
-```bash
-    $ cp -r $FOAM_SOLVERS/incompressible/icoFoam/ My_icoFoam
-    $ cd My_icoFoam
+```console
+$ cp -r $FOAM_SOLVERS/incompressible/icoFoam/ My_icoFoam
+$ cd My_icoFoam
 ```
 
 Rename icoFoam.C to My_icoFOAM.C
 
-```bash
-    $ mv icoFoam.C My_icoFoam.C
+```console
+$ mv icoFoam.C My_icoFoam.C
 ```
 
 Edit _files_ file in _Make_ directory:
@@ -224,6 +223,6 @@ and change to:
 
 In directory My_icoFoam give the compilation command:
 
-```bash
-    $ wmake
+```console
+$ wmake
 ```
diff --git a/docs.it4i/anselm/software/paraview.md b/docs.it4i/anselm/software/paraview.md
index 7007369800f88b5c672640ee8c32952ca73d4df7..830ce72a26c0e6a22683534e12444392040c1a58 100644
--- a/docs.it4i/anselm/software/paraview.md
+++ b/docs.it4i/anselm/software/paraview.md
@@ -12,32 +12,35 @@ Homepage : <http://www.paraview.org/>
 
 ## Installed Version
 
-Currently, version 4.0.1 compiled with GCC 4.8.1 against Bull MPI library and OSMesa 10.0 is installed on Anselm.
+Currently, version 5.1.2 compiled with intel/2017a against intel MPI library and OSMesa 12.0.2 is installed on Anselm.
 
 ## Usage
 
-On Anselm, ParaView is to be used in client-server mode. A parallel ParaView server is launched on compute nodes by the user, and client is launched on your desktop PC to control and view the visualization. Download ParaView client application for your OS here: <http://paraview.org/paraview/resources/software.php>. Important : **your version must match the version number installed on Anselm** ! (currently v4.0.1)
+On Anselm, ParaView is to be used in client-server mode. A parallel ParaView server is launched on compute nodes by the user, and client is launched on your desktop PC to control and view the visualization. Download ParaView client application for your OS here: <http://paraview.org/paraview/resources/software.php>. 
+
+!!!Warning
+    Your version must match the version number installed on Anselm.
 
 ### Launching Server
 
 To launch the server, you must first allocate compute nodes, for example
 
-```bash
-    $ qsub -I -q qprod -A OPEN-0-0 -l select=2
+```console
+$ qsub -I -q qprod -A OPEN-0-0 -l select=2
 ```
 
 to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](../job-submission-and-execution/) for details.
 
 After the interactive session is opened, load the ParaView module :
 
-```bash
-    $ module add paraview
+```console
+$ ml ParaView/5.1.2-intel-2017a-mpi
 ```
 
 Now launch the parallel server, with number of nodes times 16 processes:
 
-```bash
-    $ mpirun -np 32 pvserver --use-offscreen-rendering
+```console
+$ mpirun -np 32 pvserver --use-offscreen-rendering
     Waiting for client...
     Connection URL: cs://cn77:11111
     Accepting connection(s): cn77:11111
@@ -49,23 +52,26 @@ Note the that the server is listening on compute node cn77 in this case, we shal
 
 Because a direct connection is not allowed to compute nodes on Anselm, you must establish a SSH tunnel to connect to the server. Choose a port number on your PC to be forwarded to ParaView server, for example 12345. If your PC is running Linux, use this command to establish a SSH tunnel:
 
-```bash
-    ssh -TN -L 12345:cn77:11111 username@anselm.it4i.cz
+```console
+$ ssh -TN -L 12345:cn77:11111 username@anselm.it4i.cz
 ```
 
-replace username with your login and cn77 with the name of compute node your ParaView server is running on (see previous step). If you use PuTTY on Windows, load Anselm connection configuration, t>hen go to Connection-> SSH>->Tunnels to set up the port forwarding. Click Remote radio button. Insert 12345 to Source port textbox. Insert cn77:11111. Click Add button, then Open.
+replace username with your login and cn77 with the name of compute node your ParaView server is running on (see previous step). 
+
+If you use PuTTY on Windows, load Anselm connection configuration, then go to *Connection* -> *SSH* -> *Tunnels* to set up the port forwarding.
+
+Fill the Source port and Destination fields. **Do not forget to click the Add button.**
+
+![](../../img/paraview_ssh_tunnel.png "SSH Tunnel in PuTTY")
 
-Now launch ParaView client installed on your desktop PC. Select File->Connect..., click Add Server. Fill in the following :
+Now launch ParaView client installed on your desktop PC. Select *File* -> *Connect*... and fill in the following :
 
-Name : Anselm tunnel
-Server Type : Client/Server
-Host : localhost
-Port : 12345
+![](../../img/paraview_connect.png "ParaView - Connect to server")
 
-Click Configure, Save, the configuration is now saved for later use. Now click Connect to connect to the ParaView server. In your terminal where you have interactive session with ParaView server launched, you should see:
+The configuration is now saved for later use. Now click Connect to connect to the ParaView server. In your terminal where you have interactive session with ParaView server launched, you should see:
 
-```bash
-    Client connected.
+```console
+Client connected.
 ```
 
 You can now use Parallel ParaView.
diff --git a/docs.it4i/anselm/software/virtualization.md b/docs.it4i/anselm/software/virtualization.md
index a5c7c95aa5f2c1df601606ecc42ed2c8398fb249..109a771b0c5307471a0131e61298eae9e242467f 100644
--- a/docs.it4i/anselm/software/virtualization.md
+++ b/docs.it4i/anselm/software/virtualization.md
@@ -154,7 +154,7 @@ Create job script according recommended
 
 Example job for Windows virtual machine:
 
-```bash
+```bat
     #/bin/sh
 
     JOB_DIR=/scratch/$USER/win/${PBS_JOBID}
@@ -192,7 +192,7 @@ Job script links application data (win), input data (data) and run script (run.b
 
 Example run script (run.bat) for Windows virtual machine:
 
-```bash
+```doscon
     z:
     cd winappl
     call application.bat z:data z:output
@@ -210,40 +210,37 @@ Virtualization is enabled only on compute nodes, virtualization does not work on
 
 Load QEMU environment module:
 
-```bash
-    $ module add qemu
+```console
+$ module add qemu
 ```
 
 Get help
 
-```bash
-    $ man qemu
+```console
+$ man qemu
 ```
 
 Run virtual machine (simple)
 
-```bash
-    $ qemu-system-x86_64 -hda linux.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -vnc :0
-
-    $ qemu-system-x86_64 -hda win.img   -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -vnc :0
+```console
+$ qemu-system-x86_64 -hda linux.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -vnc :0
+$ qemu-system-x86_64 -hda win.img   -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -vnc :0
 ```
 
 You can access virtual machine by VNC viewer (option -vnc) connecting to IP address of compute node. For VNC you must use VPN network.
 
 Install virtual machine from ISO file
 
-```bash
-    $ qemu-system-x86_64 -hda linux.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -cdrom linux-install.iso -boot d -vnc :0
-
-    $ qemu-system-x86_64 -hda win.img   -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -cdrom win-install.iso -boot d -vnc :0
+```console
+$ qemu-system-x86_64 -hda linux.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -cdrom linux-install.iso -boot d -vnc :0
+$ qemu-system-x86_64 -hda win.img   -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -cdrom win-install.iso -boot d -vnc :0
 ```
 
 Run virtual machine using optimized devices, user network back-end with sharing and port forwarding, in snapshot mode
 
-```bash
-    $ qemu-system-x86_64 -drive file=linux.img,media=disk,if=virtio -enable-kvm -cpu host -smp 16 -m 32768 -vga std -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::2222-:22 -vnc :0 -snapshot
-
-    $ qemu-system-x86_64 -drive file=win.img,media=disk,if=virtio -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::3389-:3389 -vnc :0 -snapshot
+```console
+$ qemu-system-x86_64 -drive file=linux.img,media=disk,if=virtio -enable-kvm -cpu host -smp 16 -m 32768 -vga std -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::2222-:22 -vnc :0 -snapshot
+$ qemu-system-x86_64 -drive file=win.img,media=disk,if=virtio -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::3389-:3389 -vnc :0 -snapshot
 ```
 
 Thanks to port forwarding you can access virtual machine via SSH (Linux) or RDP (Windows) connecting to IP address of compute node (and port 2222 for SSH). You must use VPN network).
@@ -259,22 +256,22 @@ In default configuration IP network 10.0.2.0/24 is used, host has IP address 10.
 
 Simple network setup
 
-```bash
-    $ qemu-system-x86_64 ... -net nic -net user
+```console
+$ qemu-system-x86_64 ... -net nic -net user
 ```
 
 (It is default when no -net options are given.)
 
 Simple network setup with sharing and port forwarding (obsolete but simpler syntax, lower performance)
 
-```bash
-    $ qemu-system-x86_64 ... -net nic -net user,smb=/scratch/$USER/tmp,hostfwd=tcp::3389-:3389
+```console
+$ qemu-system-x86_64 ... -net nic -net user,smb=/scratch/$USER/tmp,hostfwd=tcp::3389-:3389
 ```
 
 Optimized network setup with sharing and port forwarding
 
-```bash
-    $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::2222-:22
+```console
+$ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::2222-:22
 ```
 
 ### Advanced Networking
@@ -285,40 +282,40 @@ Sometime your virtual machine needs access to internet (install software, update
 
 Load VDE enabled QEMU environment module (unload standard QEMU module first if necessary).
 
-```bash
-    $ module add qemu/2.1.2-vde2
+```console
+$ module add qemu/2.1.2-vde2
 ```
 
 Create virtual network switch.
 
-```bash
-    $ vde_switch -sock /tmp/sw0 -mgmt /tmp/sw0.mgmt -daemon
+```console
+$ vde_switch -sock /tmp/sw0 -mgmt /tmp/sw0.mgmt -daemon
 ```
 
 Run SLIRP daemon over SSH tunnel on login node and connect it to virtual network switch.
 
-```bash
-    $ dpipe vde_plug /tmp/sw0 = ssh login1 $VDE2_DIR/bin/slirpvde -s - --dhcp &
+```console
+$ dpipe vde_plug /tmp/sw0 = ssh login1 $VDE2_DIR/bin/slirpvde -s - --dhcp &
 ```
 
 Run qemu using vde network back-end, connect to created virtual switch.
 
 Basic setup (obsolete syntax)
 
-```bash
-    $ qemu-system-x86_64 ... -net nic -net vde,sock=/tmp/sw0
+```console
+$ qemu-system-x86_64 ... -net nic -net vde,sock=/tmp/sw0
 ```
 
 Setup using virtio device (obsolete syntax)
 
-```bash
-    $ qemu-system-x86_64 ... -net nic,model=virtio -net vde,sock=/tmp/sw0
+```console
+$ qemu-system-x86_64 ... -net nic,model=virtio -net vde,sock=/tmp/sw0
 ```
 
 Optimized setup
 
-```bash
-    $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net0 -netdev vde,id=net0,sock=/tmp/sw0
+```console
+$ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net0 -netdev vde,id=net0,sock=/tmp/sw0
 ```
 
 #### TAP Interconnect
@@ -329,9 +326,8 @@ Cluster Anselm provides TAP device tap0 for your job. TAP interconnect does not
 
 Run qemu with TAP network back-end:
 
-```bash
-    $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net1
-                           -netdev tap,id=net1,ifname=tap0,script=no,downscript=no
+```console
+$ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net1 -netdev tap,id=net1,ifname=tap0,script=no,downscript=no
 ```
 
 Interface tap0 has IP address 192.168.1.1 and network mask 255.255.255.0 (/24). In virtual machine use IP address from range 192.168.1.2-192.168.1.254. For your convenience some ports on tap0 interface are redirected to higher numbered ports, so you as non-privileged user can provide services on these ports.
@@ -344,15 +340,17 @@ Redirected ports:
 
 You can configure IP address of virtual machine statically or dynamically. For dynamic addressing provide your DHCP server on port 3067 of tap0 interface, you can also provide your DNS server on port 3053 of tap0 interface for example:
 
-```bash
-    $ dnsmasq --interface tap0 --bind-interfaces -p 3053 --dhcp-alternate-port=3067,68 --dhcp-range=192.168.1.15,192.168.1.32 --dhcp-leasefile=/tmp/dhcp.leasefile
+```console
+$ dnsmasq --interface tap0 --bind-interfaces -p 3053 --dhcp-alternate-port=3067,68 --dhcp-range=192.168.1.15,192.168.1.32 --dhcp-leasefile=/tmp/dhcp.leasefile
 ```
 
 You can also provide your SMB services (on ports 3139, 3445) to obtain high performance data sharing.
 
 Example smb.conf (not optimized)
 
-```bash
+```console
+$ cat smb.conf
+
     [global]
     socket address=192.168.1.1
     smb ports = 3445 3139
@@ -387,8 +385,8 @@ Example smb.conf (not optimized)
 
 Run SMB services
 
-```bash
-    smbd -s /tmp/qemu-smb/smb.conf
+```console
+$ smbd -s /tmp/qemu-smb/smb.conf
 ```
 
 Virtual machine can of course have more than one network interface controller, virtual machine can use more than one network back-end. So, you can combine for example use network back-end and TAP interconnect.
@@ -397,15 +395,15 @@ Virtual machine can of course have more than one network interface controller, v
 
 In snapshot mode image is not written, changes are written to temporary file (and discarded after virtual machine exits). **It is strongly recommended mode for running your jobs.** Set TMPDIR environment variable to local scratch directory for placement temporary files.
 
-```bash
-    $ export TMPDIR=/lscratch/${PBS_JOBID}
-    $ qemu-system-x86_64 ... -snapshot
+```console
+$ export TMPDIR=/lscratch/${PBS_JOBID}
+$ qemu-system-x86_64 ... -snapshot
 ```
 
 ### Windows Guests
 
 For Windows guests we recommend these options, life will be easier:
 
-```bash
-    $ qemu-system-x86_64 ... -localtime -usb -usbdevice tablet
+```console
+$ qemu-system-x86_64 ... -localtime -usb -usbdevice tablet
 ```
diff --git a/docs.it4i/anselm/storage.md b/docs.it4i/anselm/storage.md
index ad082c0d8486efa67428bae1a327527990fc64f2..2bc141c24f0b2b805e362d58ad1aab4f1da2956c 100644
--- a/docs.it4i/anselm/storage.md
+++ b/docs.it4i/anselm/storage.md
@@ -31,14 +31,14 @@ There is default stripe configuration for Anselm Lustre filesystems. However, us
 
 Use the lfs getstripe for getting the stripe parameters. Use the lfs setstripe command for setting the stripe parameters to get optimal I/O performance The correct stripe setting depends on your needs and file access patterns.
 
-```bash
+```console
 $ lfs getstripe dir|filename
 $ lfs setstripe -s stripe_size -c stripe_count -o stripe_offset dir|filename
 ```
 
 Example:
 
-```bash
+```console
 $ lfs getstripe /scratch/username/
 /scratch/username/
 stripe_count:   1 stripe_size:    1048576 stripe_offset:  -1
@@ -53,7 +53,7 @@ In this example, we view current stripe setting of the /scratch/username/ direct
 
 Use lfs check OSTs to see the number and status of active OSTs for each filesystem on Anselm. Learn more by reading the man page
 
-```bash
+```console
 $ lfs check osts
 $ man lfs
 ```
@@ -98,7 +98,7 @@ The architecture of Lustre on Anselm is composed of two metadata servers (MDS) a
   * 2 groups of 5 disks in RAID5
   * 2 hot-spare disks
 
-\###HOME
+### HOME
 
 The HOME filesystem is mounted in directory /home. Users home directories /home/username reside on this filesystem. Accessible capacity is 320TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 250GB per user. If 250GB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
 
@@ -127,15 +127,16 @@ Default stripe size is 1MB, stripe count is 1. There are 22 OSTs dedicated for t
 | Default stripe count | 1      |
 | Number of OSTs       | 22     |
 
-\###SCRATCH
+### SCRATCH
 
 The SCRATCH filesystem is mounted in directory /scratch. Users may freely create subdirectories and files on the filesystem. Accessible capacity is 146TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 100TB per user. The purpose of this quota is to prevent runaway programs from filling the entire filesystem and deny service to other users. If 100TB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request.
 
 !!! note
     The Scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the SCRATCH filesystem as their working directory.
 
-    >Users are advised to save the necessary data from the SCRATCH filesystem to HOME filesystem after the calculations and clean up the scratch files.
+    Users are advised to save the necessary data from the SCRATCH filesystem to HOME filesystem after the calculations and clean up the scratch files.
 
+!!! warning
     Files on the SCRATCH filesystem that are **not accessed for more than 90 days** will be automatically **deleted**.
 
 The SCRATCH filesystem is realized as Lustre parallel filesystem and is available from all login and computational nodes. Default stripe size is 1MB, stripe count is 1. There are 10 OSTs dedicated for the SCRATCH filesystem.
@@ -157,13 +158,13 @@ The SCRATCH filesystem is realized as Lustre parallel filesystem and is availabl
 
 User quotas on the file systems can be checked and reviewed using following command:
 
-```bash
+```console
 $ lfs quota dir
 ```
 
 Example for Lustre HOME directory:
 
-```bash
+```console
 $ lfs quota /home
 Disk quotas for user user001 (uid 1234):
     Filesystem kbytes   quota   limit   grace   files   quota   limit   grace
@@ -177,7 +178,7 @@ In this example, we view current quota size limit of 250GB and 300MB currently u
 
 Example for Lustre SCRATCH directory:
 
-```bash
+```console
 $ lfs quota /scratch
 Disk quotas for user user001 (uid 1234):
      Filesystem kbytes   quota   limit   grace   files   quota   limit   grace
@@ -191,13 +192,13 @@ In this example, we view current quota size limit of 100TB and 8KB currently use
 
 To have a better understanding of where the space is exactly used, you can use following command to find out.
 
-```bash
+```console
 $ du -hs dir
 ```
 
 Example for your HOME directory:
 
-```bash
+```console
 $ cd /home
 $ du -hs * .[a-zA-z0-9]* | grep -E "[0-9]*G|[0-9]*M" | sort -hr
 258M     cuda-samples
@@ -211,11 +212,11 @@ This will list all directories which are having MegaBytes or GigaBytes of consum
 
 To have a better understanding of previous commands, you can read manpages.
 
-```bash
+```console
 $ man lfs
 ```
 
-```bash
+```console
 $ man du
 ```
 
@@ -225,7 +226,7 @@ Extended ACLs provide another security mechanism beside the standard POSIX ACLs
 
 ACLs on a Lustre file system work exactly like ACLs on any Linux file system. They are manipulated with the standard tools in the standard manner. Below, we create a directory and allow a specific user access.
 
-```bash
+```console
 [vop999@login1.anselm ~]$ umask 027
 [vop999@login1.anselm ~]$ mkdir test
 [vop999@login1.anselm ~]$ ls -ld test
@@ -340,7 +341,7 @@ The procedure to obtain the CESNET access is quick and trouble-free.
 ### Understanding CESNET Storage
 
 !!! note
-    It is very important to understand the CESNET storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first.
+    It is very important to understand the CESNET storage before uploading data. [Please read](https://du.cesnet.cz/en/navody/home-migrace-plzen/start) first.
 
 Once registered for CESNET Storage, you may [access the storage](https://du.cesnet.cz/en/navody/faq/start) in number of ways. We recommend the SSHFS and RSYNC methods.
 
@@ -353,40 +354,40 @@ The SSHFS provides a very convenient way to access the CESNET Storage. The stora
 
 First, create the mount point
 
-```bash
-    $ mkdir cesnet
+```console
+$ mkdir cesnet
 ```
 
 Mount the storage. Note that you can choose among the ssh.du1.cesnet.cz (Plzen), ssh.du2.cesnet.cz (Jihlava), ssh.du3.cesnet.cz (Brno) Mount tier1_home **(only 5120M !)**:
 
-```bash
-    $ sshfs username@ssh.du1.cesnet.cz:. cesnet/
+```console
+$ sshfs username@ssh.du1.cesnet.cz:. cesnet/
 ```
 
 For easy future access from Anselm, install your public key
 
-```bash
-    $ cp .ssh/id_rsa.pub cesnet/.ssh/authorized_keys
+```console
+$ cp .ssh/id_rsa.pub cesnet/.ssh/authorized_keys
 ```
 
 Mount tier1_cache_tape for the Storage VO:
 
-```bash
-    $ sshfs username@ssh.du1.cesnet.cz:/cache_tape/VO_storage/home/username cesnet/
+```console
+$ sshfs username@ssh.du1.cesnet.cz:/cache_tape/VO_storage/home/username cesnet/
 ```
 
 View the archive, copy the files and directories in and out
 
-```bash
-    $ ls cesnet/
-    $ cp -a mydir cesnet/.
-    $ cp cesnet/myfile .
+```console
+$ ls cesnet/
+$ cp -a mydir cesnet/.
+$ cp cesnet/myfile .
 ```
 
 Once done, please remember to unmount the storage
 
-```bash
-    $ fusermount -u cesnet
+```console
+$ fusermount -u cesnet
 ```
 
 ### Rsync Access
@@ -398,20 +399,20 @@ Rsync is a fast and extraordinarily versatile file copying tool. It is famous fo
 
 Rsync finds files that need to be transferred using a "quick check" algorithm (by default) that looks for files that have changed in size or in last-modified time.  Any changes in the other preserved attributes (as requested by options) are made on the destination file directly when the quick check indicates that the file's data does not need to be updated.
 
-More about Rsync at <https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele>
+[More about Rsync](https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele)
 
 Transfer large files to/from CESNET storage, assuming membership in the Storage VO
 
-```bash
-    $ rsync --progress datafile username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
-    $ rsync --progress username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafile .
+```console
+$ rsync --progress datafile username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
+$ rsync --progress username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafile .
 ```
 
 Transfer large directories to/from CESNET storage, assuming membership in the Storage VO
 
-```bash
-    $ rsync --progress -av datafolder username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
-    $ rsync --progress -av username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafolder .
+```console
+$ rsync --progress -av datafolder username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
+$ rsync --progress -av username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafolder .
 ```
 
 Transfer rates of about 28 MB/s can be expected.
diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
index f064b2e6a89dc4b2c8290a0b552eac82ca973941..b2fa2f58dd0a04e2e5ace8e3035dc5b95fc4a1b5 100644
--- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
+++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md
@@ -2,6 +2,8 @@
 
 The **Virtual Network Computing** (**VNC**) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing "Desktop sharing") system that uses the [Remote Frame Buffer protocol (RFB)](http://en.wikipedia.org/wiki/RFB_protocol "RFB protocol") to remotely control another [computer](http://en.wikipedia.org/wiki/Computer "Computer"). It transmits the [keyboard](http://en.wikipedia.org/wiki/Computer_keyboard "Computer keyboard") and [mouse](http://en.wikipedia.org/wiki/Computer_mouse") events from one computer to another, relaying the graphical [screen](http://en.wikipedia.org/wiki/Computer_screen "Computer screen") updates back in the other direction, over a [network](http://en.wikipedia.org/wiki/Computer_network "Computer network").
 
+Vnc-based connections are faster (require less network bandwidth) then [X11](x-window-system) applications forwarded directly through ssh.
+
 The recommended clients are [TightVNC](http://www.tightvnc.com) or [TigerVNC](http://sourceforge.net/apps/mediawiki/tigervnc/index.php?title=Main_Page) (free, open source, available for almost any platform).
 
 ## Create VNC Password
@@ -9,7 +11,7 @@ The recommended clients are [TightVNC](http://www.tightvnc.com) or [TigerVNC](ht
 !!! note
     Local VNC password should be set before the first login. Do use a strong password.
 
-```bash
+```console
 [username@login2 ~]$ vncpasswd
 Password:
 Verify:
@@ -20,20 +22,20 @@ Verify:
 !!! note
     To access VNC a local vncserver must be started first and also a tunnel using SSH port forwarding must be established.
 
-[See below](vnc.md#linux-example-of-creating-a-tunnel) for the details on SSH tunnels. In this example we use port 61.
+[See below](#linuxmac-os-example-of-creating-a-tunnel) for the details on SSH tunnels. In this example we use display number 61.
 
-You can find ports which are already occupied. Here you can see that ports " /usr/bin/Xvnc :79" and " /usr/bin/Xvnc :60" are occupied.
+You can find display numbers which are already occupied on the login2. Here you can see that displays " /usr/bin/Xvnc :79" and " /usr/bin/Xvnc :60" are occupied.
 
-```bash
+```console
 [username@login2 ~]$ ps aux | grep Xvnc
-username    5971 0.0 0.0 201072 92564 ?        SN   Sep22   4:19 /usr/bin/Xvnc :79 -desktop login2:79 (username) -auth /home/gre196/.Xauthority -geometry 1024x768 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5979 -fp catalogue:/etc/X11/fontpath.d -pn
-username    10296 0.0 0.0 131772 21076 pts/29   SN   13:01   0:01 /usr/bin/Xvnc :60 -desktop login2:61 (username) -auth /home/username/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/jir13/.vnc/passwd -rfbport 5960 -fp catalogue:/etc/X11/fontpath.d -pn
+username    5971 0.0 0.0 201072 92564 ?        SN   Sep22   4:19 /usr/bin/Xvnc :79 -desktop login2:79 (username) -auth /home/vop999/.Xauthority -geometry 1024x768 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5979 -fp catalogue:/etc/X11/fontpath.d -pn
+username    10296 0.0 0.0 131772 21076 pts/29   SN   13:01   0:01 /usr/bin/Xvnc :60 -desktop login2:61 (username) -auth /home/vop999/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/vop999/.vnc/passwd -rfbport 5960 -fp catalogue:/etc/X11/fontpath.d -pn
 .....
 ```
 
 Choose free port e.g. 61 and start your VNC server:
 
-```bash
+```console
 [username@login2 ~]$ vncserver :61 -geometry 1600x900 -depth 16
 
 New 'login2:1 (username)' desktop is login2:1
@@ -42,9 +44,9 @@ Starting applications specified in /home/username/.vnc/xstartup
 Log file is /home/username/.vnc/login2:1.log
 ```
 
-Check if VNC server is started on the port (in this example 61):
+Check if VNC server is started (in this example display number is 61):
 
-```bash
+```console
 [username@login2 .vnc]$ vncserver -list
 
 TigerVNC server sessions:
@@ -55,13 +57,15 @@ X DISPLAY #     PROCESS ID
 
 Another command:
 
-```bash
+```console
 [username@login2 .vnc]$  ps aux | grep Xvnc
 
-username    10296 0.0 0.0 131772 21076 pts/29   SN   13:01   0:01 /usr/bin/Xvnc :61 -desktop login2:61 (username) -auth /home/jir13/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5961 -fp catalogue:/etc/X11/fontpath.d -pn
+username    10296 0.0 0.0 131772 21076 pts/29   SN   13:01   0:01 /usr/bin/Xvnc :61 -desktop login2:61 (username) -auth /home/vop999/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5961 -fp catalogue:/etc/X11/fontpath.d -pn
 ```
+!!! note
+    The vncserver runs on port 5900 + display number. You get your port number simply as 5900 + display number (in this example 61), so the result is 5961.
 
-To access the VNC server you have to create a tunnel between the login node using TCP **port 5961** and your machine using a free TCP port (for simplicity the very same, in this case).
+To access the VNC server you have to create a tunnel between the login node using TCP **port 5961** and your machine using a free TCP port (for simplicity the very same, in this case). See examples for [Linux/Mac OS](#linuxmac-os-example-of-creating-a-tunnel) and [Windows](#windows-example-of-creating-a-tunnel).
 
 !!! note
     The tunnel must point to the same login node where you launched the VNC server, eg. login2. If you use just cluster-name.it4i.cz, the tunnel might point to a different node due to DNS round robin.
@@ -70,13 +74,13 @@ To access the VNC server you have to create a tunnel between the login node usin
 
 At your machine, create the tunnel:
 
-```bash
+```console
 local $  ssh -TN -f username@login2.cluster-name.it4i.cz -L 5961:localhost:5961
 ```
 
 Issue the following command to check the tunnel is established (please note the PID 2022 in the last column, you'll need it for closing the tunnel):
 
-```bash
+```console
 local $ netstat -natp | grep 5961
 (Not all processes could be identified, non-owned process info
  will not be shown, you would have to be root to see it all.)
@@ -86,14 +90,14 @@ tcp6       0      0 ::1:5961                :::*                    LISTEN
 
 Or on Mac OS use this command:
 
-```bash
+```console
 local-mac $ lsof -n -i4TCP:5961 | grep LISTEN
 ssh 75890 sta545 7u IPv4 0xfb062b5c15a56a3b 0t0 TCP 127.0.0.1:5961 (LISTEN)
 ```
 
 Connect with the VNC client:
 
-```bash
+```console
 local $ vncviewer 127.0.0.1:5961
 ```
 
@@ -101,7 +105,7 @@ In this example, we connect to VNC server on port 5961, via the ssh tunnel. The
 
 You have to destroy the SSH tunnel which is still running at the background after you finish the work. Use the following command (PID 2022 in this case, see the netstat command above):
 
-```bash
+```console
 kill 2022
 ```
 
@@ -113,7 +117,7 @@ Start vncserver using command vncserver described above.
 
 Search for the localhost and port number (in this case 127.0.0.1:5961).
 
-```bahs
+```console
 [username@login2 .vnc]$ netstat -tanp | grep Xvnc
 (Not all processes could be identified, non-owned process info
  will not be shown, you would have to be root to see it all.)
@@ -160,7 +164,7 @@ Uncheck both options below the slider:
 
 If the screen gets locked you have to kill the screensaver. Do not to forget to disable the screensaver then.
 
-```bash
+```console
 [username@login2 .vnc]$ ps aux | grep screen
 username     1503 0.0 0.0 103244   892 pts/4    S+   14:37   0:00 grep screen
 username     24316 0.0 0.0 270564 3528 ?        Ss   14:12   0:00 gnome-screensaver
@@ -172,7 +176,7 @@ username     24316 0.0 0.0 270564 3528 ?        Ss   14:12   0:00 gnome-screensa
 
 You should kill your VNC server using command:
 
-```bash
+```console
 [username@login2 .vnc]$  vncserver  -kill :61
 Killing Xvnc process ID 7074
 Xvnc process ID 7074 already killed
@@ -180,7 +184,7 @@ Xvnc process ID 7074 already killed
 
 Or this way:
 
-```bash
+```console
 [username@login2 .vnc]$  pkill vnc
 ```
 
@@ -194,19 +198,19 @@ Open a Terminal (Applications -> System Tools -> Terminal). Run all the next com
 
 Allow incoming X11 graphics from the compute nodes at the login node:
 
-```bash
+```console
 $ xhost +
 ```
 
 Get an interactive session on a compute node (for more detailed info [look here](../../../anselm/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (24 cores in this example) from the production queue:
 
-```bash
+```console
 $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A PROJECT_ID -q qprod -l select=1:ncpus=24
 ```
 
 Test that the DISPLAY redirection into your VNC session works, by running a X11 application (e. g. XTerm) on the assigned compute node:
 
-```bash
+```console
 $ xterm
 ```
 
diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md
index b9c6951295a6b4d96fceb53c6d383464bee6d5c1..961123f511f779edc6e508aec5e6461f5506f06d 100644
--- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md
+++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md
@@ -9,7 +9,7 @@ The X Window system is a principal way to get GUI access to the clusters. The **
 
 In order to display graphical user interface GUI of various software tools, you need to enable the X display forwarding. On Linux and Mac, log in using the -X option tho ssh client:
 
-```bash
+```console
  local $ ssh -X username@cluster-name.it4i.cz
 ```
 
@@ -19,13 +19,13 @@ On Windows use the PuTTY client to enable X11 forwarding. In PuTTY menu, go to C
 
 To verify the forwarding, type
 
-```bash
+```console
 $ echo $DISPLAY
 ```
 
 if you receive something like
 
-```bash
+```console
 localhost:10.0
 ```
 
@@ -44,8 +44,8 @@ Mac OS users need to install [XQuartz server](https://www.xquartz.org).
 There are variety of X servers available for Windows environment. The commercial Xwin32 is very stable and rich featured. The Cygwin environment provides fully featured open-source XWin X server. For simplicity, we recommend open-source X server by the [Xming project](http://sourceforge.net/projects/xming/). For stability and full features we recommend the
 [XWin](http://x.cygwin.com/) X server by Cygwin
 
-| How to use Xwin                                                                                                                                                                                                         | How to use Xming                                                                                     |
-| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
+| How to use Xwin | How to use Xming |
+|--- | --- |
 | [Install Cygwin](http://x.cygwin.com/) Find and execute XWin.exe to start the X server on Windows desktop computer.[If no able to forward X11 using PuTTY to CygwinX](#if-no-able-to-forward-x11-using-putty-to-cygwinx) | Use Xlaunch to configure the Xming. Run Xming to start the X server on Windows desktop computer. |
 
 Read more on [http://www.math.umn.edu/systems_guide/putty_xwin32.html](http://www.math.umn.edu/systems_guide/putty_xwin32.shtml)
@@ -57,12 +57,12 @@ Read more on [http://www.math.umn.edu/systems_guide/putty_xwin32.html](http://ww
 
 Then launch the application as usual. Use the & to run the application in background.
 
-```bash
-$ module load intel (idb and gvim not installed yet)
+```console
+$ ml intel (idb and gvim not installed yet)
 $ gvim &
 ```
 
-```bash
+```console
 $ xterm
 ```
 
@@ -72,7 +72,7 @@ In this example, we activate the intel programing environment tools, then start
 
 Allocate the compute nodes using -X option on the qsub command
 
-```bash
+```console
 $ qsub -q qexp -l select=2:ncpus=24 -X -I
 ```
 
@@ -80,7 +80,7 @@ In this example, we allocate 2 nodes via qexp queue, interactively. We request X
 
 **Better performance** is obtained by logging on the allocated compute node via ssh, using the -X option.
 
-```bash
+```console
 $ ssh -X r24u35n680
 ```
 
@@ -95,13 +95,13 @@ The Gnome 2.28 GUI environment is available on the clusters. We recommend to use
 To run the remote Gnome session in a window on Linux/OS X computer, you need to install Xephyr. Ubuntu package is
 xserver-xephyr, on OS X it is part of [XQuartz](http://xquartz.macosforge.org/landing/). First, launch Xephyr on local machine:
 
-```bash
+```console
 local $ Xephyr -ac -screen 1024x768 -br -reset -terminate :1 &
 ```
 
 This will open a new X window with size 1024 x 768 at DISPLAY :1. Next, ssh to the cluster with DISPLAY environment variable set and launch gnome-session
 
-```bash
+```console
 local $ DISPLAY=:1.0 ssh -XC yourname@cluster-name.it4i.cz -i ~/.ssh/path_to_your_key
 ... cluster-name MOTD...
 yourname@login1.cluster-namen.it4i.cz $ gnome-session &
@@ -109,7 +109,7 @@ yourname@login1.cluster-namen.it4i.cz $ gnome-session &
 
 On older systems where Xephyr is not available, you may also try Xnest instead of Xephyr. Another option is to launch a new X server in a separate console, via:
 
-```bash
+```console
 xinit /usr/bin/ssh -XT -i .ssh/path_to_your_key yourname@cluster-namen.it4i.cz gnome-session -- :1 vt12
 ```
 
@@ -122,7 +122,7 @@ Use Xlaunch to start the Xming server or run the XWin.exe. Select the "One windo
 
 Log in to the cluster, using PuTTY. On the cluster, run the gnome-session command.
 
-```bash
+```console
 $ gnome-session &
 ```
 
@@ -132,7 +132,7 @@ Use System-Log Out to close the gnome-session
 
 ### if No Able to Forward X11 Using PuTTY to CygwinX
 
-```bash
+```console
 [usename@login1.anselm ~]$ gnome-session &
 [1] 23691
 [usename@login1.anselm ~]$ PuTTY X11 proxy: unable to connect to forwarded X server: Network error: Connection refused
diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md
index a2a4d429fc06d4943a0ab89df247f410ccdc4bd2..5a952ea24c738ad59acf7b94bed9fe23602b83e9 100644
--- a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md
+++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md
@@ -4,9 +4,9 @@
 
 After logging in, you can see .ssh/ directory with SSH keys and authorized_keys file:
 
-```bash
-    $ cd /home/username/
-    $ ls -la .ssh/
+```console
+$ cd /home/username/
+$ ls -la .ssh/
     total 24
     drwx------ 2 username username 4096 May 13 15:12 .
     drwxr-x---22 username username 4096 May 13 07:22 ..
@@ -21,18 +21,18 @@ After logging in, you can see .ssh/ directory with SSH keys and authorized_keys
 
 ## Access Privileges on .ssh Folder
 
-* .ssh directory: 700 (drwx------)
-* Authorized_keys, known_hosts and public key (.pub file): 644 (-rw-r--r--)
-* Private key (id_rsa/id_rsa.ppk): 600 (-rw-------)
-
-```bash
-    cd /home/username/
-    chmod 700 .ssh/
-    chmod 644 .ssh/authorized_keys
-    chmod 644 .ssh/id_rsa.pub
-    chmod 644 .ssh/known_hosts
-    chmod 600 .ssh/id_rsa
-    chmod 600 .ssh/id_rsa.ppk
+* .ssh directory: `700 (drwx------)`
+* Authorized_keys, known_hosts and public key (.pub file): `644 (-rw-r--r--)`
+* Private key (id_rsa/id_rsa.ppk): `600 (-rw-------)`
+
+```console
+$ cd /home/username/
+$ chmod 700 .ssh/
+$ chmod 644 .ssh/authorized_keys
+$ chmod 644 .ssh/id_rsa.pub
+$ chmod 644 .ssh/known_hosts
+$ chmod 600 .ssh/id_rsa
+$ chmod 600 .ssh/id_rsa.ppk
 ```
 
 ## Private Key
@@ -40,11 +40,11 @@ After logging in, you can see .ssh/ directory with SSH keys and authorized_keys
 !!! note
     The path to a private key is usually /home/username/.ssh/
 
-Private key file in "id_rsa" or `*.ppk` format is used to authenticate with the servers. Private key is present locally on local side and used for example in SSH agent Pageant (for Windows users). The private key should always be kept in a safe place.
+Private key file in `id_rsa` or `*.ppk` format is used to authenticate with the servers. Private key is present locally on local side and used for example in SSH agent Pageant (for Windows users). The private key should always be kept in a safe place.
 
 An example of private key format:
 
-```bash
+```console
     -----BEGIN RSA PRIVATE KEY-----
     MIIEpAIBAAKCAQEAqbo7jokygnBpG2wYa5NB45ns6+UKTNLMLHF0BO3zmRtKEElE
     aGqXfbYwvXlcuRb2d9/Y5dVpCZHV0kbY3NhtVOcEIe+1ROaiU9BEsUAhMNEvgiLV
@@ -76,11 +76,11 @@ An example of private key format:
 
 ## Public Key
 
-Public key file in "\*.pub" format is used to verify a digital signature. Public key is present on the remote side and allows access to the owner of the matching private key.
+Public key file in `*.pub` format is used to verify a digital signature. Public key is present on the remote side and allows access to the owner of the matching private key.
 
 An example of public key format:
 
-```bash
+```console
 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpujuOiTKCcGkbbBhrk0Hjmezr5QpM0swscXQE7fOZG0oQSURoapd9tjC9eVy5FvZ339jl1WkJkdXSRtjc2G1U5wQh77VE5qJT0ESxQCEw0S+CItWBKqXhC9E7gFY+UyP5YBZcOneh6gGHyCVfK6H215vzKr3x+/WvWl5gZGtbf+zhX6o4RJDRdjZPutYJhEsg/qtMxcCtMjfm/dZTnXeafuebV8nug3RCBUflvRb1XUrJuiX28gsd4xfG/P6L/mNMR8s4kmJEZhlhxpj8Th0iIc+XciVtXuGWQrbddcVRLxAmvkYAPGnVVOQeNj69pqAR/GXaFAhvjYkseEowQao1 username@organization.example.com
 ```
 
@@ -88,8 +88,8 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpujuOiTKCcGkbbBhrk0Hjmezr5QpM0swscXQE7fOZ
 
 First, generate a new keypair of your public and private key:
 
-```bash
-    local $ ssh-keygen -C 'username@organization.example.com' -f additional_key
+```console
+local $ ssh-keygen -C 'username@organization.example.com' -f additional_key
 ```
 
 !!! note
@@ -99,12 +99,12 @@ You can insert additional public key into authorized_keys file for authenticatio
 
 Example:
 
-```bash
-    $ cat additional_key.pub > ~/.ssh/authorized_keys
+```console
+$ cat additional_key.pub > ~/.ssh/authorized_keys
 ```
 
 In this example, we add an additional public key, stored in file additional_key.pub into the authorized_keys. Next time we log in, we will be able to use the private addtional_key key to log in.
 
 ## How to Remove Your Own Key
 
-Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (eg. _username@organization.example.com_).
+Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (eg. `username@organization.example.com`).
diff --git a/docs.it4i/general/obtaining-login-credentials/certificates-faq.md b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md
index bf0b5c5acc85d611237908cfecf5f8e73b07afd5..671db427ea0f0b3794ec44cb099c31ccde418fbd 100644
--- a/docs.it4i/general/obtaining-login-credentials/certificates-faq.md
+++ b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md
@@ -21,7 +21,7 @@ However, users need only manage User and CA certificates. Note that your user ce
 
 ## Q: How Do I Get a User Certificate That Can Be Used With IT4Innovations?
 
-To get a certificate, you must make a request to your local, IGTF approved, Certificate Authority (CA). Usually you then must visit, in person, your nearest Registration Authority (RA) to verify your affiliation and identity (photo identification is required). Usually, you will then be emailed details on how to retrieve your certificate, although procedures can vary between CAs. If you are in Europe, you can locate [your trusted CA](www.eugridpma.org/members/worldmap).
+To get a certificate, you must make a request to your local, IGTF approved, Certificate Authority (CA). Usually you then must visit, in person, your nearest Registration Authority (RA) to verify your affiliation and identity (photo identification is required). Usually, you will then be emailed details on how to retrieve your certificate, although procedures can vary between CAs. If you are in Europe, you can locate [your trusted CA](https://www.eugridpma.org/members/worldmap/).
 
 In some countries certificates can also be retrieved using the TERENA Certificate Service, see the FAQ below for the link.
 
@@ -57,7 +57,7 @@ It is worth noting that gsissh-term and DART automatically updates their CA cert
 
 Lastly, if you need the CA certificates for a personal Globus 5 installation, then you can install the CA certificates from a MyProxy server with the following command.
 
-```bash
+```console
     myproxy-get-trustroots -s myproxy-prace.lrz.de
 ```
 
@@ -77,14 +77,14 @@ The following examples are for Unix/Linux operating systems only.
 
 To convert from PEM to p12, enter the following command:
 
-```bash
+```console
     openssl pkcs12 -export -in usercert.pem -inkey userkey.pem -out
     username.p12
 ```
 
 To convert from p12 to PEM, type the following _four_ commands:
 
-```bash
+```console
     openssl pkcs12 -in username.p12 -out usercert.pem -clcerts -nokeys
     openssl pkcs12 -in username.p12 -out userkey.pem -nocerts
     chmod 444 usercert.pem
@@ -93,14 +93,14 @@ To convert from p12 to PEM, type the following _four_ commands:
 
 To check your Distinguished Name (DN), enter the following command:
 
-```bash
+```console
     openssl x509 -in usercert.pem -noout -subject -nameopt
     RFC2253
 ```
 
 To check your certificate (e.g., DN, validity, issuer, public key algorithm, etc.), enter the following command:
 
-```bash
+```console
     openssl x509 -in usercert.pem -text -noout
 ```
 
@@ -110,7 +110,7 @@ To download openssl if not pre-installed, [please visit](https://www.openssl.org
 
 IT4innovations recommends the java based keytool utility to create and manage keystores, which themselves are stores of keys and certificates. For example if you want to convert your pkcs12 formatted key pair into a java keystore you can use the following command.
 
-```bash
+```console
     keytool -importkeystore -srckeystore $my_p12_cert -destkeystore
     $my_keystore -srcstoretype pkcs12 -deststoretype jks -alias
     $my_nickname -destalias $my_nickname
@@ -120,7 +120,7 @@ where $my_p12_cert is the name of your p12 (pkcs12) certificate, $my_keystore is
 
 You also can import CA certificates into your java keystore with the tool, e.g.:
 
-```bash
+```console
     keytool -import -trustcacerts -alias $mydomain -file $mydomain.crt -keystore $my_keystore
 ```
 
diff --git a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
index 7fb2cb4ef8b4fae3b024efa250eeb53ad6b312fa..b5202bb65bbd85cce248d61da6d9a4c10d0a9a29 100644
--- a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
+++ b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md
@@ -40,7 +40,7 @@ In order to authorize a Collaborator to utilize the allocated resources, the PI
 
 Example (except the subject line which must be in English, you may use Czech or Slovak language for communication with us):
 
-```bash
+```console
     Subject: Authorization to IT4Innovations
 
     Dear support,
@@ -64,7 +64,7 @@ Once authorized by PI, every person (PI or Collaborator) wishing to access the c
 1. Full name and affiliation
 1. Statement that you have read and accepted the [Acceptable use policy document](http://www.it4i.cz/acceptable-use-policy.pdf) (AUP).
 1. Attach the AUP file.
-1. Your preferred username, max 8 characters long. The preferred username must associate your surname and name or be otherwise derived from it. Only alphanumeric sequences, dash and underscore signs are allowed.
+1. Your preferred username, max 12 characters long. The preferred username must associate your surname and name or be otherwise derived from it. Only alphanumeric sequences, dash and underscore signs are allowed.
 1. In case you choose [Alternative way to personal certificate](#alternative-way-to-personal-certificate), a **scan of photo ID** (personal ID or passport or driver license) is required
 
 !!! warning
@@ -72,7 +72,7 @@ Once authorized by PI, every person (PI or Collaborator) wishing to access the c
 
 Example (except the subject line which must be in English, you may use Czech or Slovak language for communication with us):
 
-```bash
+```console
     Subject: Access to IT4Innovations
 
     Dear support,
@@ -100,7 +100,7 @@ The clusters are accessed by the [private key](../accessing-the-clusters/shell-a
 
 On Linux, use
 
-```bash
+```console
 local $ ssh-keygen -f id_rsa -p
 ```
 
@@ -118,6 +118,9 @@ Certificate generation process for academic purposes, utilizing the CESNET certi
 
 * [How to generate a personal TCS certificate in Mozilla Firefox web browser (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/moz-cert-gen)
 
+!!! note
+    Certificate file can be installed into your email client. Web-based email interfaces cannot be used for secure communication, external application, such as Thunderbird or Outlook must be used. This way, your new credentials will be visible only in applications, that have access to your certificate.
+
 If you are not able to obtain certificate from any of the respected certification authorities, please follow the Alternative Way bellow.
 
 A FAQ about certificates can be found here: [Certificates FAQ](certificates-faq/).
@@ -138,11 +141,11 @@ Follow these steps **only** if you can not obtain your certificate in a standard
 * In Firefox navigate to _Options > Advanced > Certificates > View Certificates_.
 * Choose the _Your Certificates_ tab and find the fresh certificate with today's date.
 * Select it and hit the _Backup..._ button
-* Standard save dialog should appear, where you can choose tha name of your certificate file for your easy identification in the future.
-* You will be prompted to choose a passphrase for yor new certificate. This passphrase will be needed for installation into your favourite email client.
+* Standard save dialog should appear, where you can choose a name for the certificate file for easy identification in the future.
+* You will be prompted to choose a passphrase for your new certificate. This passphrase will be needed for installation into your favourite email client.
 
 !!! note
-    Certificate file now can be installed into your email client. Web-based email interfaces cannot be used for secure communication, externall application, such as Thunderbird or Outlook must be used (instructions bellow). This way, your new credentials will be visible only in applications, that have access to your certificate.
+    Certificate file now can be installed into your email client. Web-based email interfaces cannot be used for secure communication, external application, such as Thunderbird or Outlook must be used (instructions bellow). This way, your new credentials will be visible only in applications, that have access to your certificate.
 
 ## Installation of the Certificate Into Your Mail Client
 
diff --git a/docs.it4i/img/paraview_connect.png b/docs.it4i/img/paraview_connect.png
new file mode 100644
index 0000000000000000000000000000000000000000..4f05b6a7be747b08d867b594e8e4fe31331e3a91
Binary files /dev/null and b/docs.it4i/img/paraview_connect.png differ
diff --git a/docs.it4i/img/paraview_connect_salomon.png b/docs.it4i/img/paraview_connect_salomon.png
new file mode 100644
index 0000000000000000000000000000000000000000..2e6b6f4e0749ced81826502ea37f3a75834e351a
Binary files /dev/null and b/docs.it4i/img/paraview_connect_salomon.png differ
diff --git a/docs.it4i/img/paraview_ssh_tunnel.png b/docs.it4i/img/paraview_ssh_tunnel.png
new file mode 100644
index 0000000000000000000000000000000000000000..c4e75f0bcb8abc00e1b5d9f42edd4f0e53d8d416
Binary files /dev/null and b/docs.it4i/img/paraview_ssh_tunnel.png differ
diff --git a/docs.it4i/img/paraview_ssh_tunnel_salomon.png b/docs.it4i/img/paraview_ssh_tunnel_salomon.png
new file mode 100644
index 0000000000000000000000000000000000000000..b6dc810ed754e19c38380b01e57c2dd886e79b78
Binary files /dev/null and b/docs.it4i/img/paraview_ssh_tunnel_salomon.png differ
diff --git a/docs.it4i/index.md b/docs.it4i/index.md
index 7e97161c12a16c0a8bec4540a77760cebf122063..b7a7bb2a724c74b121a8d0381d65881078b182fa 100644
--- a/docs.it4i/index.md
+++ b/docs.it4i/index.md
@@ -47,13 +47,13 @@ In this documentation, you will find a number of pages containing examples. We u
 
 Cluster command prompt
 
-```bash
+```console
 $
 ```
 
 Your local linux host command prompt
 
-```bash
+```console
 local $
 ```
 
diff --git a/docs.it4i/modules-anselm.md b/docs.it4i/modules-anselm.md
index d2e04c5900981acf1344800a46a80a3252c77588..7e805edcc6ca32b867a9a4b17f13bddf67521fcd 100644
--- a/docs.it4i/modules-anselm.md
+++ b/docs.it4i/modules-anselm.md
@@ -43,6 +43,15 @@
 | [QuantumESPRESSO](http://www.pwscf.org/) | Quantum ESPRESSO is an integrated suite of computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials (both norm-conserving and ultrasoft). |
 | [xdrfile](http://www.gromacs.org/Developer_Zone/Programming_Guide/XTC_Library) | XTC library |
 
+## Compiler
+
+| Module | Description |
+| ------ | ----------- |
+| GCC | &nbsp; |
+| GCCcore | &nbsp; |
+| icc | &nbsp; |
+| ifort | &nbsp; |
+
 ## Compilers
 
 | Module | Description |
@@ -109,7 +118,6 @@
 | Module | Description |
 | ------ | ----------- |
 | adams | &nbsp; |
-| ansys | &nbsp; |
 | beopest | &nbsp; |
 | blender | &nbsp; |
 | Code_Saturne | &nbsp; |
@@ -152,6 +160,7 @@
 | [Java](http://java.com/) | Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers. |
 | [libgdiplus](https://github.com/mono/libgdiplus) | An Open Source implementation of the GDI+ API. |
 | [Lua](http://www.lua.org/) | Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping. |
+| Mono | &nbsp; |
 | [NASM](http://www.nasm.us/) | NASM: General-purpose x86 assembler |
 | Perl | &nbsp; |
 | [Python](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. |
@@ -163,6 +172,7 @@
 | Module | Description |
 | ------ | ----------- |
 | [libdrm](http://dri.freedesktop.org) | Direct Rendering Manager runtime library. |
+| libevent | &nbsp; |
 | [libffi](http://sourceware.org/libffi/) | The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time. |
 | [libfontenc](http://www.freedesktop.org/wiki/Software/xlibs/) | X11 font encoding library |
 | [libjpeg-turbo](http://sourceforge.net/projects/libjpeg-turbo/) | libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding. |
@@ -171,7 +181,7 @@
 | [libpthread-stubs](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. |
 | [libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html) | The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands. |
 | [LibTIFF](http://www.remotesensing.org/libtiff/) | tiff: Library and tools for reading and writing TIFF data files |
-| [libtool](http://www.gnu.org/software/libtool) | GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface. |
+| libtool | &nbsp; |
 | [libunistring](http://www.gnu.org/software/libunistring/) | This library provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard. |
 | [libxcb](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. |
 | [libxml2](http://xmlsoft.org/) | Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform). |
@@ -232,7 +242,7 @@
 | Module | Description |
 | ------ | ----------- |
 | bullxmpi | &nbsp; |
-| [impi](http://software.intel.com/en-us/intel-mpi-library/) | The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification. |
+| impi | &nbsp; |
 | lam | &nbsp; |
 | [MPICH](http://www.mpich.org/) | MPICH v3.x is an open source high-performance MPI 3.0 implementation. It does not support InfiniBand (use MVAPICH2 with InfiniBand devices). |
 | mvapich2 | &nbsp; |
@@ -246,6 +256,7 @@
 | [Armadillo](http://arma.sourceforge.net/) | Armadillo is an open-source C++ linear algebra library (matrix maths) aiming towards a good balance between speed and ease of use. Integer, floating point and complex numbers are supported, as well as a subset of trigonometric and statistics functions. |
 | [arpack-ng](http://forge.scilab.org/index.php/p/arpack-ng/) | ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. |
 | [ATLAS](http://math-atlas.sourceforge.net) | ATLAS (Automatically Tuned Linear Algebra Software) is the application of the AEOS (Automated Empirical Optimization of Software) paradigm, with the present emphasis on the Basic Linear Algebra Subprograms (BLAS), a widely used, performance-critical, linear algebra kernel library. |
+| cuDNN | &nbsp; |
 | [FFTW](http://www.fftw.org) | FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data. |
 | [GSL](http://www.gnu.org/software/gsl/) | The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting. |
 | [imkl](http://software.intel.com/en-us/intel-mkl/) | Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more. |
@@ -311,7 +322,7 @@
 
 | Module | Description |
 | ------ | ----------- |
-| [CUDA](https://developer.nvidia.com/cuda-toolkit) | CUDA (formerly Compute Unified Device Architecture) is a parallel computing platform and programming model created by NVIDIA and implemented by the graphics processing units (GPUs) that they produce. CUDA gives developers access to the virtual instruction set and memory of the parallel computational elements in CUDA GPUs. |
+| CUDA | &nbsp; |
 | [hwloc](http://www.open-mpi.org/projects/hwloc/) | The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. |
 | [libpciaccess](http://cgit.freedesktop.org/xorg/lib/libpciaccess/) | Generic PCI access library. |
 
@@ -333,6 +344,7 @@
 | Module | Description |
 | ------ | ----------- |
 | advisor_xe | &nbsp; |
+| ANSYS | &nbsp; |
 | [APR](http://apr.apache.org/) | Apache Portable Runtime (APR) libraries. |
 | [APR-util](http://apr.apache.org/) | Apache Portable Runtime (APR) util libraries. |
 | [Bash](http://www.gnu.org/software/bash) | Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh). |
@@ -349,6 +361,7 @@
 | gnuplot | &nbsp; |
 | grace | &nbsp; |
 | [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress |
+| help2man | &nbsp; |
 | inspector_xe | &nbsp; |
 | intelpcm | &nbsp; |
 | ipm | &nbsp; |
@@ -381,18 +394,20 @@
 | scite | &nbsp; |
 | scorep | &nbsp; |
 | [Serf](http://serf.apache.org/) | The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library |
+| Singularity | &nbsp; |
 | [Subversion](http://subversion.apache.org/) | Subversion is an open source version control system. |
 | [Szip](http://www.hdfgroup.org/doc_resource/SZIP/) | Szip compression software, providing lossless compression of scientific data |
 | tcl | &nbsp; |
 | [tcsh](http://www.tcsh.org) | Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax. |
 | tk | &nbsp; |
+| tmux | &nbsp; |
 | totalview | &nbsp; |
 | turbovnc | &nbsp; |
 | [util-linux](http://www.kernel.org/pub/linux/utils/util-linux) | Set of Linux utilities |
 | valgrind | &nbsp; |
 | vampir | &nbsp; |
 | virtualgl | &nbsp; |
-| [VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe) | Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java. |
+| VTune | &nbsp; |
 | vtune_xe | &nbsp; |
 | [XZ](http://tukaani.org/xz/) | xz: XZ utilities |
 
diff --git a/docs.it4i/modules-matrix.json b/docs.it4i/modules-matrix.json
new file mode 100644
index 0000000000000000000000000000000000000000..4398269c467576f2fa8ec70441b95657284e3b1f
--- /dev/null
+++ b/docs.it4i/modules-matrix.json
@@ -0,0 +1 @@
+{"total": 415, "projects": {"MIKE": "default", "SIP": "4.17-Python-2.7.9", "HDF5": "1.10.0-patch1-intel-2016.01-mic", "p4vasp": "0.3.29-GNU-4.9.3-2.25", "Automake": "1.15-GNU-5.1.0-2.25", "netcdf": "4.3.0", "bullxde": "2.0", "APR-util": "1.5.4-foss-2015g", "ScaLAPACK": "2.0.2-OpenBLAS-0.2.14-LAPACK-3.5.0", "BerkeleyUPC": "2.16.2-gompi-2015b", "BWA": "0.7.5a-foss-2015g", "openmpi": "1.8.1-icc", "matlab": "R2014a-EDU", "sympy": "0.7.6-intel-2016.01-Python-2.7.9", "kbproto": "1.0.7-intel-2016a", "lsprepost": "4.2", "prace": "20160107-intel-2016.01", "mpt": "2.12", "Bison": "3.0.4-GCC-4.9.3", "totalview": "8.13", "Wine": "1.7.29-GNU-5.1.0-2.25", "opari2": "1.1.2-icc", "MAP": "5.0.1", "libyaml": "0.1.6-intel-2015b", "mercurial": "2.9.1", "beopest": "13.3", "perfsuite": "1a5.3", "PSBLAS-ext": "1.0-4-GCC-4.9.3-2.25", "OSPRay": "0.9.1", "S4MPLE": "1.0.0", "libxslt": "1.1.28-intel-2015b", "hwloc": "1.11.5-GCC-6.3.0-2.27", "libunistring": "0.9.3-intel-2015b", "QGIS": "2.12.3-foss-2015g", "ngsPipeline": "1.0.0", "boost": "1.56-icc-impi", "matplotlib": "1.4.3-intel-2015b-Python-2.7.9", "openfoam": "2.2.2-icc-openmpi1.8.1-DP", "Szip": "2.1-intel-2017a", "PROJ_4": "4.9.2-foss-2015g", "phono3py": "1.11.7.8-intel-2015b-Python-2.7.11", "CMake": "3.7.2-intel-2017a", "COMSOL": "51-EDU", "hdf5": "1.8.13", "gimkl": "2.11.5", "xineramaproto": "1.2.1-intel-2015b", "xextproto": "7.3.0-intel-2016a", "GLM": "0.9.7.2-intel-2017a", "SWIG": "3.0.7-Python-2.7.9", "tmux": "2.3", "ipm": "0.983-icc-impi", "SUMO": "0.27.1-foss-2015g", "ipp": "15.3.187", "hdf5-parallel": "1.8.13-gcc49", "PrgEnv-intel": "15.0.3", "libxcb": "1.11-Python-2.7.9", "MPI_NET": "1.2.0-intel-2016.01", "QEMU": "2.1.2-GCC-4.4.7-system-VDE2", "cp2k-mpi": "2.5.1-gcc", "OTF2": "2.0-intel-2015b-mic", "VirtualGL": "2.4.1", "Armadillo": "7.500.0-foss-2016a-Python-3.5.2", "netcdf-fortran": "4.2", "perfcatcher": "1.0", "tk": "8.5.15", "itac": "9.1.2.024", "LAPACKE": "3.5.0-LAPACK-3.5.0", "PrgEnv-gnu": "4.8.1", "libICE": "1.0.9-intel-2015b", "Rstudio": "0.97", "VisIt": "2.10.0", "virtualgl": "2.4", "Scipion": "1.0.1-Java-1.8.0_112-OpenMPI-1.10.2-GCC-5.3.0-2.26", "grace": "5.1.25-intel-2015b", "ANSYS": "18.0", "ATLAS": "3.10.1-GCC-4.9.3-2.25-LAPACK-3.4.2", "Scalasca": "2.3.1-intel-2015b", "BCFtools": "1.3-foss-2015g", "gcc": "5.4.0", "lxml": "3.4.4-intel-2015b-Python-2.7.9", "lsdyna": "7.x.x", "PGI": "16.10-GNU-4.9.3-2.25", "advisor_xe": "2015.1.10.380555", "CUDA": "8.0.44-intel-2017.00", "gatk": "2.6-4", "Spark": "1.5.2", "ifort": "2017.1.132-GCC-6.3.0-2.27", "lam": "7.1.4-icc", "PyYAML": "3.11-intel-2015b-Python-2.7.9", "tcsh": "6.19.00", "gperf": "3.0.4-intel-2016a", "METIS": "5.1.0-intel-2017.00", "Digimat": "5.0.1-EDU", "pigz": "2.3.3-GCC-6.2.0-2.27", "Autotools": "20150215-GNU-5.1.0-2.25", "parallel": "20150322-GNU-5.1.0-2.25", "bowtie2": "2.2.3", "QuantumESPRESSO": "5.4.0-intel-2017.00", "CP2K": "2.6.0-intel-2015b", "MATIO": "1.5.2-intel-2017a", "wine": "1.7.29", "libX11": "1.6.3-intel-2016a", "HyperWorks": "13.0", "hpg-aligner": "1.0.0", "PCRE": "8.39-intel-2017.00", "modflow-2005": "1.11.00", "EasyBuild": "3.1.0", "adios": "1.8.0", "GLOBUS": "globus", "picard": "2.1.0", "turbovnc": "1.2.3", "settarg": "7.2.2", "JOE": "4.2", "libSM": "1.2.2-intel-2015b", "pixman": "0.32.6-intel-2015b", "flex": "2.6.3-GCCcore-6.3.0", "libgdiplus": "3.12-GNU-5.1.0-2.25", "python": "3.4.2", "namd": "2.8", "APR": "1.5.2-foss-2015g", "aislinn": "20160105-Python-2.7.9-gompi-2015e", "inspector_xe": "2015.1.2.379161", "h5py": "2.4.0-ictce-7.3.5-Python-2.7.9-serial", "cURL": "7.51.0-intel-2017.00", "SIONlib": "1.6.1-tools", "bupc": "2.16.2", "PAPI": "5.4.3-pic", "PerfReports": "5.0.1", "cairo": "1.12.18-foss-2015b", "Harminv": "1.4-intel-2015b", "Perl": "5.24.0-GCC-4.9.3-2.25-bare", "Lua": "5.1.4-8", "fftw2-mpi": "2.1.5-icc", "mxml": "2.9", "Maven": "3.3.9", "GATK": "3.5-Java-1.7.0_79", "Trimmomatic": "0.35-Java-1.7.0_79", "GCCcore": "6.3.0", "GCC": "6.3.0-2.27", "xcb-proto": "1.11-Python-2.7.9", "hypermesh": "12.0.110", "imkl": "2017.1.132-iimpi-2017a", "Meep": "1.3-intel-2015b", "eudev": "3.1.5-intel-2016a", "Vampir": "9.0.0", "FastQC": "0.11.3", "PROJ": "4.9.2-intel-2017.00", "NASM": "2.11.08-intel-2017.00", "mvapich2": "1.9-icc", "iompi": "2017.01", "OpenCV": "3.0.0-intel-2015b", "ParaView": "5.0.0-binary", "ISL": "0.15-GNU-4.9.3-2.25", "intelpcm": "2.6", "Libint": "1.1.4-intel-2015b", "libreadline": "6.3-intel-2017a", "SpatiaLite": "4.3.0a-foss-2015g", "Clang": "3.7.0-GNU-5.1.0-2.25", "ParMETIS": "4.0.3-intel-2017a", "Mesa": "11.2.1-foss-2016a", "fftw3": "3.3.3-icc", "slepc": "3.7.2-icc16-impi5-mkl-opt", "MPFR": "3.1.5-intel-2017.00", "OpenCL-builder": "2015", "OpenCL-runtime": "15.1", "relion": "1.3", "XZ": "5.2.2-intel-2017.00", "libunwind": "1.1-GCC-5.4.0-2.26", "libevent": "2.1.8", "fftw2": "2.1.5-icc", "dytran": "2013.0.1", "ffmpeg": "2.4-intel-2015b", "M4": "1.4.18-GCCcore-6.3.0", "FFTW": "3.3.6-gompi-2017a", "PyQt": "4.11.4-foss-2015g-Python-2.7.9", "NWChem": "6.5.revision26243-intel-2015b-2014-09-10-Python-2.7.8", "hyperworks": "13.0", "ant": "1.9.3-Java-1.7.0_79", "Forge": "7.0", "arpack-ng": "3.4.0-intel-2017.00", "comsol": "50-EDU", "GEOS": "3.5.0-foss-2015g", "Singularity": "2.2-GCC-6.3.0-2.27", "VTune": "2016_update1", "digimat": "5.0.1", "LLVM": "3.9.0-intel-2017.00", "Qt": "4.8.6-foss-2015g", "fixesproto": "5.0-intel-2016a", "Molpro": "2010.1-patch-57-intel2015b", "libXdamage": "1.1.4-intel-2016a", "bullxmpi": "bullxmpi_1.2.4.1", "scalasca2": "2.0-icc-impi", "openssh-x509": "6.2p2", "mpi.net": "1.0.0-mono-3.12.1", "gimpi": "2.11.5", "R": "3.2.3-intel-2016.01", "Racket": "6.1.1-GNU-5.1.0-2.25", "SCOTCH": "6.0.4-intel-2017a", "fastqc": "0.11.2", "trilinos": "11.2.3-icc", "netcdf-parallel": "4.3.0", "chicken": "4.8.0.6", "OpenBLAS": "0.2.19-GCC-6.3.0-2.27-LAPACK-3.7.0", "blender": "2.71", "PCRE2": "10.22-intel-2017.00", "fontconfig": "2.11.94-intel-2017.00", "Octave": "4.0.1-gimkl-2.11.5", "DCW": "1.1.2", "Qwt": "6.1.2-foss-2015g", "Bash": "4.3", "freetype": "2.6.3-intel-2016a", "cube": "4.2.3-icc", "Valgrind": "3.11.0-intel-2015b", "iimpi": "2017a", "dhi-mike": "default", "tbb": "15.3.187", "guile": "1.8.8-intel-2015b", "PSBLAS": "3.3.4-3-GCC-4.9.3-2.25", "libXfont": "1.5.1-Python-2.7.9", "szip": "2.1", "memoryscape": "3.4", "vampir": "8.2", "libpciaccess": "0.13.4-intel-2016a", "JasPer": "1.900.1-intel-2015b", "racket": "6.0.1", "foss": "2017a", "Boost": "1.61.0-foss-2016a-serial", "FIAT": "1.6.0-intel-2016.01-Python-2.7.9", "PRACE": "prace", "gompi": "2017a", "lux": "1.3.1", "LibTIFF": "4.0.3-intel-2015b", "netCDF-Fortran": "4.4.0-intel-2016.01", "libpng": "1.6.16-intel-2015b", "SuiteSparse": "4.5.3-intel-2017a-ParMETIS-4.0.3", "FOX": "1.6.51-foss-2015g", "DDT": "5.0.1", "libctl": "3.2.2-intel-2015b", "mono": "3.12.1", "valgrind": "3.9.0-impi", "PLUMED": "2.3b-foss-2016a", "SnuCL": "1.3.3-gompi-2015e", "Tcl": "8.6.5-intel-2017a", "libXft": "2.3.2-intel-2015b", "binutils": "2.27-GCCcore-6.3.0", "GPI-2": "1.1.1-gompi-2015e-MPI", "xdrfile": "1.1.4-intel-2015b", "libGLU": "9.0.0-foss-2015g", "otf2": "1.4-icc", "util-linux": "2.28-intel-2016a", "lmod": "7.2.2", "MVAPICH2": "2.1-GNU-5.1.0-2.25", "byacc": "20150711-intel-2015b", "java": "1.7", "marc": "2013.1", "elmer": "7.0-r6695-opt", "HTSlib": "1.3-foss-2015g", "MATLAB": "2015b-EDU", "gupc": "4.8.0.3", "abinit": "7.10.1-icc-impi", "numpy": "1.9.1-intel-2015b-Python-2.7.9", "modflow-nwt": "1.0.9-aquaveo", "Adams": "2013.2", "ncurses": "6.0-intel-2017a", "MUMPS": "5.0.2-intel-2017a-parmetis", "Score-P": "3.0-intel-2015b", "ruby": "2.0.0-p247", "Subversion": "1.8.16-foss-2015g", "NAMD": "2.9-mpi", "zlib": "1.2.11-GCCcore-6.3.0", "xtrans": "1.3.5-intel-2016a", "snpEff": "3.6", "ABINIT": "7.10.1-intel-2015b", "libMesh": "0.9.5-intel-2016.01", "motif": "2.3.4-intel-2015b-libX11-1.6.2", "GNU": "5.1.0-2.25-intel-2015b", "almost": "2.1.0-intel-2015b", "libxml2": "2.9.3-intel-2017.00", "expat": "2.1.0-intel-2017.00", "Code_Saturne": "3.0.5", "opencl-rt": "4.5.0.8", "samtools": "0.1.19", "ictce": "8.3.5", "Python": "3.5.2-intel-2017.00", "make": "3.82-intel-2015b", "Mono": "4.2.2.10-intel-2016.01", "FreeFem++": "3.45-intel-2015b", "SAMtools": "1.3-foss-2015g", "SQLite": "3.13.0-intel-2017a", "HPL": "2.1-intel-2015b", "OpenDX": "4.4.4-foss-2015g", "Autoconf": "2.69-GNU-5.1.0-2.25", "RStudio": "0.98.1103", "globus": "globus", "fontsproto": "2.1.3-intel-2016a", "SDE": "7.41.0", "gzip": "1.6-intel-2015b", "gsl": "1.16-icc", "tcl": "8.5.15", "TotalView": "8.15.4-6-linux-x86-64", "MPICH": "3.2-GCC-5.3.1-snapshot-20160419-2.25", "GSL": "2.1-intel-2015b", "libXfixes": "5.0.1-intel-2016a", "OpenFOAM": "3.0.0-intel-2016.01", "SCons": "2.3.6-Python-2.7.9", "iccifort": "2017.1.132-GCC-6.3.0-2.27", "plasma": "2.6.0", "phonopy": "1.11.6.7-intel-2015b-Python-2.7.11", "libXdmcp": "1.1.2-intel-2016a", "Mercurial": "3.7.3-foss-2015g-Python-2.7.9", "xproto": "7.0.28-intel-2016a", "FLTK": "1.3.2-intel-2015b", "hpg-variant": "1.0.0", "libdrm": "2.4.68-intel-2016a", "intel": "2017a", "nwchem": "6.3-rev2-patch1-venus", "adams": "2013.2", "makedepend": "1.0.5-intel-2016a", "numactl": "2.0.11-GCC-6.3.0-2.27", "vtune_xe": "2015.3.0.403110", "Discovery_Studio": "4.0", "wien2k": "14.2", "help2man": "1.47.4-GCCcore-6.3.0", "xbitmaps": "1.1.1-intel-2015b", "Inspector": "2016_update1", "spGPU": "master-GCC-4.9.3-2.25", "JUnit": "4.11-Java-1.7.0_79", "nastran": "2013.1.1", "pkg-config": "0.29-intel-2016a", "Java": "1.8.0_112", "Marc": "2013.1.0", "magma": "1.3.0-mic", "libXrender": "0.9.8-intel-2015b", "inputproto": "2.3-intel-2015b", "libfontenc": "1.1.3-intel-2016a", "GDAL": "2.1.0-GNU-5.1.0-2.25-intel-2015b", "Cube": "4.3.4-intel-2015b", "icc": "2017.1.132-GCC-6.3.0-2.27", "qemu": "2.1.2-vde2", "fftw3-mpi": "3.3.3-icc", "Ruby": "2.3.1", "libXau": "1.0.8-intel-2016a", "Doxygen": "1.8.11-intel-2017a", "gpi2": "1.1.1", "dataspaces": "1.4.0", "RELION": "1.3-intel-2015b", "libXinerama": "1.1.3-intel-2015b", "Amber": "14", "MPC": "1.0.2-intel-2017.00", "LAMMPS": "28Jun14-intel-2015b", "libjpeg-turbo": "1.4.2-intel-2017.00", "perfboost": "1.0", "python-meep": "1.4.2-intel-2015b-Python-2.7.9-Meep-1.3", "petsc": "3.7.3-icc16-impi5-mkl-opt", "ScientificPython": "2.9.4-intel-2016.01-Python-2.7.9", "cuda": "7.5", "scorep": "1.2.3-icc-impi", "OPARI2": "2.0", "MLD2P4": "2.0-rc4-GCC-4.9.3-2.25", "maxwell": "3.0", "VampirServer": "9.0.0-intel-2015b", "spatialindex": "1.8.5-foss-2015g", "vde2": "2.3.2", "paraview": "4.0.1-gcc481-bullxmpi1.2.4.1-osmesa10.0", "Siesta": "4.1-b2-intel-2017.00", "VASP": "5.4.1-intel-2017.00-24Jun15", "git": "2.11.0-GNU-4.9.3-2.25", "lammps": "28Jun14", "mkl": "15.3.187", "xorg-macros": "1.19.0-intel-2016a", "likwid": "4.1.2-intel", "OpenCoarrays": "1.4.0-GCC-5.3.1-snapshot-20160419-2.25", "GROMACS": "5.1.2-intel-2016a-hybrid", "libXt": "1.1.5-foss-2015g", "tensorflow": "0.12.0", "libXext": "1.3.3-intel-2016a", "GMT": "5.2.1-foss-2015g", "molpro": "2010.1-p45-intel", "fds": "6.svn", "pest": "13.0", "PerformanceReports": "6.0.6", "netcdf-cxx": "4.2", "impi": "2017-BETA.ENG", "Lmod": "7.2.2", "libmatheval": "1.1.11-intel-2015b", "GLib": "2.40.0-GCC-4.4.7-system", "QCA": "2.1.0-foss-2015g", "scite": "3.4.3", "Tk": "8.6.5-intel-2017a", "hpg-fastq": "1.0.0", "SnpEff": "4.1_G", "libpthread-stubs": "0.3-intel-2016a", "bzip2": "1.0.6-intel-2017a", "cmake": "2.8.11-mic", "gnuplot": "4.6.5", "gettext": "0.19.6-intel-2017.00", "VDE2": "2.3.2-GCC-4.4.7-system", "Advisor": "2017", "glproto": "1.4.17-intel-2016a", "ORCA": "3_0_3-linux_x86-64", "llvm": "3.6.0", "papi": "5.4.0-mic", "Serf": "1.3.8-foss-2015g", "libxc": "2.2.1-intel-2015b", "libtool": "2.4.6-GCC-6.3.0-2.27", "libffi": "3.2.1-GCC-4.4.7-system", "libmesh": "0.9.3-petsc-3.4.4-icc-impi-mkl-opt", "opencl-sdk": "4.6.0.92", "GMP": "6.1.1-intel-2017.00", "PETSc": "3.6.3-intel-2015b-Python-2.7.11", "OpenMPI": "2.0.2-GCC-6.3.0-2.27", "netCDF": "4.4.1-intel-2017a", "Hypre": "2.10.1-intel-2015b", "renderproto": "0.11-intel-2015b", "oscar-modules": "1.0.3"}}
diff --git a/docs.it4i/modules-matrix.md b/docs.it4i/modules-matrix.md
index fd1c8b86f58262e7c21826dcf7e8d403a59a5682..c0a08f8e0bd6b3d022c1b91d6ab12f76b69ba9f6 100644
--- a/docs.it4i/modules-matrix.md
+++ b/docs.it4i/modules-matrix.md
@@ -1,337 +1,344 @@
 !!! Hint "Cluster Acronyms"
-    \* A - Anselm
-    \* S - Salomon
-    \* U - uv1 at Salomon
-| Module | Versions | Clusters |
+    A - Anselm • S - Salomon • U - uv1 at Salomon
+
+| Module </br><input id="searchInput" placeholder="🔍 Filter" style="width: 8rem; border-radius: 0.2rem; color: black; padding-left: .2rem;"> | Versions | Clusters |
 | ------ | -------- | -------- |
-| abinit | 7.10.1-gcc-openmpi</br>7.10.1-icc-impi</br>7.6.2 | `--A`</br>`--A`</br>`--A` |
+| abinit | 7.6.2</br>7.10.1-gcc-openmpi</br>7.10.1-icc-impi | `--A`</br>`--A`</br>`--A` |
 | ABINIT | 7.10.1-foss-2015b</br>7.10.1-intel-2015b | `US-`</br>`US-` |
 | adams | 2013.2 | `--A` |
 | Adams | 2013.2 | `-S-` |
 | adios | 1.8.0 | `--A` |
 | Advisor | 2016_update2</br>2017 | `-S-`</br>`-S-` |
-| advisor_xe | 2015.1.10.380555</br>2013.5 | `--A`</br>`--A` |
+| advisor_xe | 2013.5</br>2015.1.10.380555 | `--A`</br>`--A` |
 | aislinn | 20160105-Python-2.7.9-gompi-2015e | `-S-` |
-| almost | 2.1.0-intel-2015b</br>2.1.0-foss-2015g</br>2.1.0-foss-2016a</br>2.1.0-foss-2015b | `-S-`</br>`-SA`</br>`--A`</br>`-S-` |
+| almost | 2.1.0-foss-2015b</br>2.1.0-foss-2015g</br>2.1.0-foss-2016a</br>2.1.0-intel-2015b | `-S-`</br>`-SA`</br>`-SA`</br>`-S-` |
 | Amber | 14 | `-S-` |
-| ANSYS | 17.0</br>16.1 | `US-`</br>`US-` |
-| ansys | 14.5.x</br>15.0.x</br>16.0.x | `--A`</br>`--A`</br>`--A` |
+| ANSYS | 14.5.x</br>15.0.x</br>16.0.x</br>16.1</br>17.0</br>18.0 | `--A`</br>`--A`</br>`--A`</br>`US-`</br>`US-`</br>`-SA` |
 | ant | 1.9.3-Java-1.7.0_79 | `-S-` |
-| APR | 1.5.2-foss-2015g</br>1.5.2 | `-SA`</br>`-SA` |
+| APR | 1.5.2</br>1.5.2-foss-2015g | `-SA`</br>`-SA` |
 | APR-util | 1.5.4</br>1.5.4-foss-2015g | `-SA`</br>`-SA` |
 | Armadillo | 7.500.0-foss-2016a-Python-3.5.2 | `-SA` |
-| arpack-ng | 3.3.0-foss-2016a</br>3.3.0-intel-2017.00</br>3.4.0-intel-2017.00</br>3.3.0-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| arpack-ng | 3.3.0-foss-2016a</br>3.3.0-intel-2015b</br>3.3.0-intel-2017.00</br>3.4.0-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
 | ATLAS | 3.10.1-GCC-4.9.3-2.25-LAPACK-3.4.2 | `--A` |
-| Autoconf | 2.69-foss-2015g</br>2.69-intel-2016a</br>2.69</br>2.69-GCC-4.9.3-2.25</br>2.69-foss-2016a</br>2.69-GNU-5.1.0-2.25</br>2.69-intel-2017.00</br>2.69-GNU-4.9.3-2.25</br>2.69-intel-2016.01</br>2.69-intel-2015b | `-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`USA`</br>`USA`</br>`USA`</br>`USA`</br>`-S-`</br>`-SA` |
-| Automake | 1.15</br>1.15-GNU-5.1.0-2.25</br>1.15-intel-2017.00</br>1.15-foss-2016a</br>1.15-GCC-4.9.3-2.25</br>1.15-GNU-4.9.3-2.25</br>1.15-intel-2016.01</br>1.15-foss-2015g</br>1.15-intel-2015b</br>1.15-intel-2016a | `USA`</br>`USA`</br>`USA`</br>`USA`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| Autotools | 20150215-GNU-5.1.0-2.25</br>20150215</br>20150215-intel-2015b</br>20150215-intel-2016a</br>20150215-foss-2016a</br>20150215-intel-2016.01</br>20150215-intel-2017.00</br>20150215-GNU-4.9.3-2.25</br>20150215-GCC-4.9.3-2.25 | `USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`-S-`</br>`USA`</br>`USA`</br>`-S-` |
+| Autoconf | 2.69</br>2.69-foss-2015g</br>2.69-foss-2016a</br>2.69-intel-2015b</br>2.69-intel-2016.01</br>2.69-intel-2016a</br>2.69-intel-2017.00</br>2.69-intel-2017a</br>2.69-GCC-4.9.3-2.25</br>2.69-GCC-6.3.0-2.27</br>2.69-GNU-4.9.3-2.25</br>2.69-GNU-5.1.0-2.25 | `USA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA` |
+| Automake | 1.15</br>1.15-foss-2015g</br>1.15-foss-2016a</br>1.15-intel-2015b</br>1.15-intel-2016.01</br>1.15-intel-2016a</br>1.15-intel-2017.00</br>1.15-intel-2017a</br>1.15-GCC-4.9.3-2.25</br>1.15-GCC-6.3.0-2.27</br>1.15-GNU-4.9.3-2.25</br>1.15-GNU-5.1.0-2.25 | `USA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA` |
+| Autotools | 20150215</br>20150215-foss-2016a</br>20150215-intel-2015b</br>20150215-intel-2016.01</br>20150215-intel-2016a</br>20150215-intel-2017.00</br>20150215-intel-2017a</br>20150215-GCC-4.9.3-2.25</br>20150215-GCC-6.3.0-2.27</br>20150215-GNU-4.9.3-2.25</br>20150215-GNU-5.1.0-2.25 | `USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA` |
 | Bash | 4.2-intel-2015b</br>4.3 | `US-`</br>`-SA` |
 | BCFtools | 1.3-foss-2015g | `-S-` |
-| beopest | 12.2</br>13.3</br>12.0.1 | `--A`</br>`--A`</br>`--A` |
+| beopest | 12.0.1</br>12.2</br>13.3 | `--A`</br>`--A`</br>`--A` |
 | BerkeleyUPC | 2.16.2-gompi-2015b | `-S-` |
-| binutils | 2.25-GCC-4.9.3 | `--A` |
-| Bison | 3.0.4-intel-2015b</br>3.0.4-GCC-4.9.3 | `--A`</br>`--A` |
+| binutils | 2.25-GCC-4.9.3</br>2.27-GCCcore-6.3.0 | `--A`</br>`-SA` |
+| Bison | 3.0.4-intel-2015b</br>3.0.4-GCCcore-6.3.0</br>3.0.4-GCC-4.9.3 | `--A`</br>`-SA`</br>`--A` |
 | blender | 2.71 | `--A` |
 | boost | 1.56-gcc-openmpi</br>1.56-icc-impi | `--A`</br>`--A` |
-| Boost | 1.60.0-foss-2015g-Python-2.7.9</br>1.58.0-Python-2.7.9</br>1.58.0-ictce-7.3.5-Python-2.7.9</br>1.59.0-intel-2015b</br>1.59.0-intel-2015b-Python-2.7.11</br>1.59.0-intel-2016.01</br>1.60.0-intel-2015b-Python-2.7.11</br>1.61.0-foss-2016a-serial</br>1.58.0-foss-2015g-Python-2.7.9</br>1.58.0-intel-2015b-Python-2.7.9</br>1.60.0-intel-2016a</br>1.61.0-foss-2016a</br>1.58.0-intel-2016.01-Python-2.7.9</br>1.58.0-gompi-2015e-Python-2.7.9 | `USA`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`US-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-` |
+| Boost | 1.58.0-foss-2015g-Python-2.7.9</br>1.58.0-gompi-2015e-Python-2.7.9</br>1.58.0-ictce-7.3.5-Python-2.7.9</br>1.58.0-intel-2015b-Python-2.7.9</br>1.58.0-intel-2016.01-Python-2.7.9</br>1.58.0-Python-2.7.9</br>1.59.0-intel-2015b</br>1.59.0-intel-2015b-Python-2.7.11</br>1.59.0-intel-2016.01</br>1.60.0-foss-2015g-Python-2.7.9</br>1.60.0-intel-2015b-Python-2.7.11</br>1.60.0-intel-2016a</br>1.61.0-foss-2016a</br>1.61.0-foss-2016a-serial</br>1.63.0-intel-2017a-Python-2.7.11 | `-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-SA`</br>`-S-` |
 | bowtie2 | 2.2.3 | `--A` |
 | bullxde | 2.0 | `-S-` |
 | bullxmpi | bullxmpi-1.2.4.3</br>bullxmpi_1.2.4.1 | `--A`</br>`--A` |
 | bupc | 2.16.2 | `--A` |
 | BWA | 0.7.5a-foss-2015g | `-S-` |
-| byacc | 20120526-intel-2015b</br>20120526-foss-2015b</br>20120526-foss-2016a</br>20120526</br>20120526-foss-2015g</br>20150711-intel-2015b | `-SA`</br>`-S-`</br>`--A`</br>`-SA`</br>`-SA`</br>`--A` |
-| cairo | 1.12.18-foss-2015b</br>1.12.18 | `-S-`</br>`-SA` |
+| byacc | 20120526</br>20120526-foss-2015b</br>20120526-foss-2015g</br>20120526-foss-2016a</br>20120526-intel-2015b</br>20150711-intel-2015b | `-SA`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-SA`</br>`--A` |
+| bzip2 | 1.0.6-intel-2017a | `-S-` |
+| cairo | 1.12.18</br>1.12.18-foss-2015b | `-SA`</br>`-S-` |
 | chicken | 4.8.0.6 | `--A` |
 | Clang | 3.7.0-GNU-5.1.0-2.25 | `-S-` |
-| CMake | 3.4.1-intel-2015b</br>3.3.1-foss-2016a</br>3.6.2-intel-2017.00</br>3.0.0-foss-2015g</br>3.3.2-intel-2016.01</br>3.5.2-intel-2017.00</br>3.5.2-GCC-4.9.3-2.25</br>3.4.1-foss-2016a</br>3.3.1-GCC-5.3.0-2.25</br>3.3.1-foss-2015g</br>3.5.1-intel-2016a</br>3.3.1-intel-2016.01</br>3.3.1-GNU-5.1.0-2.25</br>3.3.1-GCC-4.9.3-2.25</br>3.3.1-GNU-4.9.3-2.25</br>3.3.2-GNU-4.9.3-2.25</br>3.4.1-GCCcore-4.9.3</br>3.3.1-GCC-5.3.1-snapshot-20160419-2.25</br>3.4.3-intel-2016a</br>3.4.1-foss-2015b</br>3.5.2</br>3.0.0-intel-2016.01</br>3.0.0-ictce-7.3.5</br>3.5.2-intel-2016a</br>3.6.2</br>3.5.2-foss-2016a</br>3.0.0-intel-2015b | `-SA`</br>`--A`</br>`U--`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`--A`</br>`USA`</br>`USA`</br>`US-` |
-| cmake | 2.8.11-mic</br>2.8.11 | `--A`</br>`--A` |
+| CMake | 3.0.0-foss-2015g</br>3.0.0-ictce-7.3.5</br>3.0.0-intel-2015b</br>3.0.0-intel-2016.01</br>3.3.1-foss-2015g</br>3.3.1-foss-2016a</br>3.3.1-intel-2016.01</br>3.3.1-GCC-4.9.3-2.25</br>3.3.1-GCC-5.3.0-2.25</br>3.3.1-GCC-5.3.1-snapshot-20160419-2.25</br>3.3.1-GNU-4.9.3-2.25</br>3.3.1-GNU-5.1.0-2.25</br>3.3.2-intel-2016.01</br>3.3.2-GNU-4.9.3-2.25</br>3.4.1-foss-2015b</br>3.4.1-foss-2016a</br>3.4.1-intel-2015b</br>3.4.1-GCCcore-4.9.3</br>3.4.3-intel-2016a</br>3.5.1-intel-2016a</br>3.5.2</br>3.5.2-foss-2016a</br>3.5.2-intel-2016a</br>3.5.2-intel-2017.00</br>3.5.2-GCC-4.9.3-2.25</br>3.6.2</br>3.6.2-intel-2017.00</br>3.7.2-intel-2017a | `-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-SA`</br>`--A`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`--A`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`--A`</br>`-S-`</br>`-S-`</br>`U-A`</br>`U--`</br>`-S-` |
+| cmake | 2.8.11</br>2.8.11-mic | `--A`</br>`--A` |
 | Code_Saturne | 3.0.5 | `--A` |
 | COMSOL | 51-COM</br>51-EDU | `-S-`</br>`-S-` |
-| comsol | 44-EDU</br>50-EDU</br>43b-EDU</br>43b-COM</br>50-COM</br>44-COM | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| comsol | 43b-COM</br>43b-EDU</br>44-COM</br>44-EDU</br>50-COM</br>50-EDU | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
 | CP2K | 2.6.0-intel-2015b | `-S-` |
 | cp2k-mpi | 2.5.1-gcc | `--A` |
-| cube | 4.2.3-icc</br>4.2.3-gcc | `--A`</br>`--A` |
+| cube | 4.2.3-gcc</br>4.2.3-icc | `--A`</br>`--A` |
 | Cube | 4.3.4-intel-2015b | `-S-` |
-| CUDA | 7.5.18 | `--A` |
-| cuda | 6.5.14</br>6.0.37</br>7.5 | `--A`</br>`--A`</br>`--A` |
-| cURL | 7.47.0-intel-2017.00</br>7.51.0</br>7.51.0-intel-2017.00</br>7.37.1</br>7.45.0-foss-2015b</br>7.37.1-intel-2016.01</br>7.37.1-foss-2015g</br>7.37.1-intel-2015b | `-S-`</br>`--A`</br>`US-`</br>`USA`</br>`US-`</br>`-S-`</br>`-SA`</br>`US-` |
+| CUDA | 7.5.18</br>8.0.44</br>8.0.44-intel-2017.00 | `--A`</br>`--A`</br>`--A` |
+| cuda | 6.0.37</br>6.5.14</br>7.5 | `--A`</br>`--A`</br>`--A` |
+| cuDNN | 5.1-CUDA-8.0.44 | `--A` |
+| cURL | 7.37.1</br>7.37.1-foss-2015g</br>7.37.1-intel-2015b</br>7.37.1-intel-2016.01</br>7.45.0-foss-2015b</br>7.47.0-intel-2017.00</br>7.49.1-intel-2017a</br>7.51.0</br>7.51.0-intel-2017.00 | `USA`</br>`-SA`</br>`US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`--A`</br>`US-` |
 | dataspaces | 1.4.0 | `--A` |
 | DCW | 1.1.2 | `-S-` |
-| DDT | 5.0.1</br>4.2 | `-S-`</br>`-S-` |
-| dhi-mike | default</br>2014</br>2016</br>2016-SP2 | `--A`</br>`--A`</br>`--A`</br>`--A` |
+| DDT | 4.2</br>5.0.1 | `-S-`</br>`-S-` |
+| dhi-mike | 2014</br>2016</br>2016-SP2</br>default | `--A`</br>`--A`</br>`--A`</br>`--A` |
 | digimat | 5.0.1 | `--A` |
 | Digimat | 5.0.1-COM</br>5.0.1-EDU | `-S-`</br>`-S-` |
 | Discovery_Studio | 4.0 | `--A` |
-| Doxygen | 1.8.7-foss-2015g</br>1.8.7-intel-2016.01</br>1.8.11-intel-2017.00</br>1.8.10-foss-2015b</br>1.8.10-intel-2017.00</br>1.8.7-intel-2015b</br>1.8.11 | `-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`US-`</br>`-SA` |
+| Doxygen | 1.8.7-foss-2015g</br>1.8.7-intel-2015b</br>1.8.7-intel-2016.01</br>1.8.10-foss-2015b</br>1.8.10-intel-2017.00</br>1.8.11</br>1.8.11-intel-2017.00</br>1.8.11-intel-2017a | `-S-`</br>`US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
 | dytran | 2013.0.1 | `--A` |
-| EasyBuild | 2.9.0</br>3.0.2</br>3.0.0</br>3.0.1</br>2.8.1</br>2.8.0 | `-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`--A`</br>`-SA` |
+| EasyBuild | 2.8.0</br>2.8.1</br>2.9.0</br>3.0.0</br>3.0.1</br>3.0.2</br>3.1.0 | `-SA`</br>`--A`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA` |
 | elmer | 7.0-r6695-dbg</br>7.0-r6695-opt | `--A`</br>`--A` |
 | eudev | 3.1.5-foss-2016a</br>3.1.5-intel-2016a | `-S-`</br>`-S-` |
-| expat | 2.1.0-intel-2015b</br>2.1.0</br>2.1.0-foss-2015g</br>2.1.0-intel-2017.00</br>2.1.0-foss-2015b | `-S-`</br>`USA`</br>`-SA`</br>`US-`</br>`US-` |
+| expat | 2.1.0</br>2.1.0-foss-2015b</br>2.1.0-foss-2015g</br>2.1.0-intel-2015b</br>2.1.0-intel-2017.00 | `USA`</br>`US-`</br>`-SA`</br>`-S-`</br>`US-` |
 | FastQC | 0.11.3 | `US-` |
 | fastqc | 0.11.2 | `--A` |
-| fds | 5.5.3-omp</br>5.5.3</br>6.svn | `--A`</br>`--A`</br>`--A` |
-| ffmpeg | 2.4-foss-2015g</br>2.4-intel-2015b</br>2.4 | `-S-`</br>`-S-`</br>`-SA` |
-| FFTW | 3.3.4-gompi-2015b</br>3.3.5-intel-2017.00</br>3.3.4-intel-2015b</br>3.3.4-gompi-2015g</br>3.3.4-gompi-2015e</br>3.3.5-intel-2016.01</br>2.1.5-iimpi-7.3.5-GNU-5.1.0-2.25</br>2.1.5-gompi-2015b</br>3.3.5-intel-2016a</br>3.3.5-gompi-2016a</br>3.3.5-foss-2016a</br>3.3.4-intel-2016.01</br>3.3.4-gompi-2016.04</br>3.3.4-gompi-2016a | `US-`</br>`USA`</br>`USA`</br>`USA`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-SA`</br>`USA` |
+| fds | 5.5.3</br>5.5.3-omp</br>6.svn | `--A`</br>`--A`</br>`--A` |
+| ffmpeg | 2.4</br>2.4-foss-2015g</br>2.4-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
+| FFTW | 2.1.5-gompi-2015b</br>2.1.5-iimpi-7.3.5-GNU-5.1.0-2.25</br>3.3.4-gompi-2015b</br>3.3.4-gompi-2015e</br>3.3.4-gompi-2015g</br>3.3.4-gompi-2016.04</br>3.3.4-gompi-2016a</br>3.3.4-intel-2015b</br>3.3.4-intel-2016.01</br>3.3.5-foss-2016a</br>3.3.5-gompi-2016a</br>3.3.5-intel-2016.01</br>3.3.5-intel-2016a</br>3.3.5-intel-2017.00</br>3.3.6-gompi-2017a | `-S-`</br>`-S-`</br>`US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`USA`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA` |
 | fftw2 | 2.1.5-gcc</br>2.1.5-icc | `--A`</br>`--A` |
 | fftw2-mpi | 2.1.5-gcc</br>2.1.5-icc | `--A`</br>`--A` |
 | fftw3 | 3.3.3-gcc</br>3.3.3-icc | `--A`</br>`--A` |
 | fftw3-mpi | 3.3.3-gcc</br>3.3.3-icc | `--A`</br>`--A` |
-| FIAT | 1.6.0-intel-2016.01-Python-2.7.9</br>1.6.0-intel-2015b-Python-2.7.9</br>1.6.0-intel-2015b-Python-2.7.11 | `-S-`</br>`-S-`</br>`-S-` |
-| fixesproto | 5.0</br>5.0-foss-2015g</br>5.0-intel-2016a</br>5.0-foss-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| flex | 2.5.39-GCC-4.9.3</br>2.5.39-foss-2016a | `--A`</br>`--A` |
+| FIAT | 1.6.0-intel-2015b-Python-2.7.9</br>1.6.0-intel-2015b-Python-2.7.11</br>1.6.0-intel-2016.01-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
+| fixesproto | 5.0</br>5.0-foss-2015g</br>5.0-foss-2016a</br>5.0-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| flex | 2.5.39-foss-2016a</br>2.5.39-GCC-4.9.3</br>2.6.0-intel-2017a</br>2.6.3-GCCcore-6.3.0 | `-SA`</br>`--A`</br>`-S-`</br>`--A` |
 | FLTK | 1.3.2</br>1.3.2-intel-2015b | `-SA`</br>`-S-` |
-| fontconfig | 2.11.1-intel-2015b</br>2.11.94-intel-2017.00</br>2.11.1-foss-2015b</br>2.11.1 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
-| fontsproto | 2.1.3</br>2.1.3-intel-2016a</br>2.1.3-foss-2015g</br>2.1.3-foss-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| Forge | 6.0.6</br>6.1.2</br>5.1-43967</br>6.0.5</br>5.7 | `-SA`</br>`-SA`</br>`-SA`</br>`-SA`</br>`--A` |
-| foss | 2016.04</br>2016a</br>2015b</br>2015e</br>2015g | `-SA`</br>`USA`</br>`US-`</br>`US-`</br>`USA` |
+| fontconfig | 2.11.1</br>2.11.1-foss-2015b</br>2.11.1-intel-2015b</br>2.11.94-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| fontsproto | 2.1.3</br>2.1.3-foss-2015g</br>2.1.3-foss-2016a</br>2.1.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| Forge | 5.1-43967</br>5.7</br>6.0.5</br>6.0.6</br>6.1.2</br>7.0 | `-SA`</br>`--A`</br>`-SA`</br>`-SA`</br>`-SA`</br>`-S-` |
+| foss | 2015b</br>2015e</br>2015g</br>2016.04</br>2016a</br>2017a | `US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA` |
 | FOX | 1.6.51-foss-2015g | `-S-` |
 | FreeFem++ | 3.45-intel-2015b | `-S-` |
-| freetype | 2.6.2-intel-2016a</br>2.5.3-foss-2015g</br>2.5.3-foss-2015b</br>2.5.5-intel-2015b</br>2.6.3-intel-2016a</br>2.6.2-intel-2017.00</br>2.5.3-intel-2015b</br>2.6.3-foss-2016a</br>2.5.3 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
+| freetype | 2.5.3</br>2.5.3-foss-2015b</br>2.5.3-foss-2015g</br>2.5.3-intel-2015b</br>2.5.5-intel-2015b</br>2.6.2-intel-2016a</br>2.6.2-intel-2017.00</br>2.6.3-foss-2016a</br>2.6.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
 | gatk | 2.6-4 | `--A` |
 | GATK | 2.6-5-Java-1.7.0_79</br>3.5-Java-1.7.0_79 | `US-`</br>`-S-` |
-| gcc | 4.8.1</br>5.4.0</br>4.9.0 | `--A`</br>`--A`</br>`--A` |
-| GCC | 6.2.0-2.27</br>4.9.2-binutils-2.25</br>4.4.7-system</br>5.4.0-2.26</br>5.3.1-snapshot-20160419-2.25</br>5.3.0-binutils-2.25</br>5.2.0</br>5.3.0-2.26</br>4.9.3-2.25</br>5.3.0-2.25</br>4.7.4</br>4.8.3</br>5.1.0-binutils-2.25</br>4.9.3-binutils-2.25</br>4.9.2</br>4.9.3 | `US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`-S-`</br>`USA` |
-| GCCcore | 5.3.1-snapshot-20160419</br>5.3.0</br>5.4.0</br>6.2.0</br>4.9.3 | `-S-`</br>`-SA`</br>`USA`</br>`US-`</br>`USA` |
-| GDAL | 2.1.0-intel-2015b</br>2.1.0-GNU-5.1.0-2.25-intel-2015b</br>2.1.0-foss-2015g</br>1.9.2-foss-2015g</br>2.1.0-GNU-5.1.0-2.25</br>2.0.1-foss-2015b</br>2.0.2-intel-2017.00 | `-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-S-`</br>`US-`</br>`-S-` |
+| gcc | 4.8.1</br>4.9.0</br>5.4.0 | `--A`</br>`--A`</br>`--A` |
+| GCC | 4.4.7-system</br>4.7.4</br>4.8.3</br>4.9.2</br>4.9.2-binutils-2.25</br>4.9.3</br>4.9.3-2.25</br>4.9.3-binutils-2.25</br>5.1.0-binutils-2.25</br>5.2.0</br>5.3.0-2.25</br>5.3.0-2.26</br>5.3.0-binutils-2.25</br>5.3.1-snapshot-20160419-2.25</br>5.4.0-2.26</br>6.2.0-2.27</br>6.3.0-2.27 | `US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`USA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-SA` |
+| GCCcore | 4.9.3</br>5.3.0</br>5.3.1-snapshot-20160419</br>5.4.0</br>6.2.0</br>6.3.0 | `USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`US-`</br>`-SA` |
+| GDAL | 1.9.2-foss-2015g</br>2.0.1-foss-2015b</br>2.0.2-intel-2017.00</br>2.1.0-foss-2015g</br>2.1.0-intel-2015b</br>2.1.0-GNU-5.1.0-2.25</br>2.1.0-GNU-5.1.0-2.25-intel-2015b | `-SA`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
 | GEOS | 3.5.0-foss-2015g | `-S-` |
-| gettext | 0.19.4</br>0.19.4-GCC-4.4.7-system</br>0.19.6-intel-2017.00 | `--A`</br>`U--`</br>`--A` |
+| gettext | 0.19.4</br>0.19.4-GCC-4.4.7-system</br>0.19.6-intel-2017.00</br>0.19.8 | `--A`</br>`U--`</br>`--A`</br>`-SA` |
 | gimkl | 2.11.5 | `--A` |
 | gimpi | 2.11.5 | `--A` |
-| git | 2.9.2</br>2.9.0</br>2.8.0-GNU-4.9.3-2.25</br>2.11.0-intel-2017.00</br>2.11.0-GNU-4.9.3-2.25</br>2.11.0</br>2.8.0-intel-2017.00 | `-S-`</br>`-S-`</br>`-SA`</br>`U--`</br>`--A`</br>`-S-`</br>`US-` |
-| GLib | 2.40.0-GCC-4.4.7-system</br>2.40.0</br>2.40.0-foss-2015b</br>2.40.0-intel-2016.01</br>2.40.0-foss-2015g</br>2.40.0-intel-2015b | `U--`</br>`-SA`</br>`U--`</br>`-S-`</br>`-S-`</br>`US-` |
+| git | 2.8.0-intel-2017.00</br>2.8.0-GNU-4.9.3-2.25</br>2.9.0</br>2.9.2</br>2.11.0</br>2.11.0-intel-2017.00</br>2.11.0-GNU-4.9.3-2.25</br>2.11.1-GNU-4.9.3-2.25 | `US-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`U--`</br>`--A`</br>`--A` |
+| GLib | 2.40.0</br>2.40.0-foss-2015b</br>2.40.0-foss-2015g</br>2.40.0-intel-2015b</br>2.40.0-intel-2016.01</br>2.40.0-GCC-4.4.7-system | `-SA`</br>`U--`</br>`-S-`</br>`US-`</br>`-S-`</br>`U--` |
+| GLM | 0.9.7.2-intel-2017a | `-S-` |
 | GLOBUS | globus | `--A` |
 | globus | globus | `-S-` |
-| glproto | 1.4.16-foss-2015g</br>1.4.17-intel-2016a</br>1.4.17-foss-2016a | `-S-`</br>`-S-`</br>`-S-` |
-| GMP | 6.1.0-intel-2017.00</br>6.1.0-intel-2016a</br>5.0.5</br>6.0.0a-GNU-5.1.0-2.25</br>6.0.0a-GNU-4.9.3-2.25</br>6.1.0-GCC-4.9.3-2.25</br>6.0.0a-intel-2015b</br>6.1.0-foss-2016a</br>6.1.1-intel-2017.00</br>5.0.5-foss-2015g</br>5.0.5-intel-2015b</br>6.0.0a</br>5.0.5-foss-2015b</br>6.0.0a-foss-2015b</br>6.1.0-intel-2015b</br>6.1.0-intel-2016.01 | `USA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`USA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
+| glproto | 1.4.16-foss-2015g</br>1.4.17-foss-2016a</br>1.4.17-intel-2016a | `-S-`</br>`-S-`</br>`-S-` |
+| GMP | 5.0.5</br>5.0.5-foss-2015b</br>5.0.5-foss-2015g</br>5.0.5-intel-2015b</br>6.0.0a</br>6.0.0a-foss-2015b</br>6.0.0a-intel-2015b</br>6.0.0a-GNU-4.9.3-2.25</br>6.0.0a-GNU-5.1.0-2.25</br>6.1.0-foss-2016a</br>6.1.0-intel-2015b</br>6.1.0-intel-2016.01</br>6.1.0-intel-2016a</br>6.1.0-intel-2017.00</br>6.1.0-GCC-4.9.3-2.25</br>6.1.1-intel-2017.00</br>6.1.1-intel-2017a | `-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-` |
 | GMT | 5.2.1-foss-2015g | `-S-` |
 | GNU | 4.9.3-2.25</br>5.1.0-2.25</br>5.1.0-2.25-intel-2015b | `USA`</br>`USA`</br>`-S-` |
 | gnuplot | 4.6.5 | `--A` |
-| gompi | 2016.04</br>2016a</br>2015b</br>2015e</br>2015g | `-SA`</br>`USA`</br>`US-`</br>`US-`</br>`USA` |
+| gompi | 2015b</br>2015e</br>2015g</br>2016.04</br>2016a</br>2017a | `US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA` |
 | gperf | 3.0.4-foss-2015g</br>3.0.4-foss-2016a</br>3.0.4-intel-2016a | `-S-`</br>`-S-`</br>`-S-` |
-| GPI-2 | 1.1.1-gompi-2015e-MPI</br>1.1.1-gompi-2015e | `-S-`</br>`-S-` |
-| gpi2 | 1.1.1</br>1.1.0</br>1.0.2 | `--A`</br>`--A`</br>`--A` |
+| GPI-2 | 1.1.1-gompi-2015e</br>1.1.1-gompi-2015e-MPI | `-S-`</br>`-S-` |
+| gpi2 | 1.0.2</br>1.1.0</br>1.1.1 | `--A`</br>`--A`</br>`--A` |
 | grace | 5.1.23</br>5.1.25-intel-2015b | `--A`</br>`-S-` |
-| GROMACS | 5.1.2-intel-2016a-hybrid</br>5.0.4-ictce-7.3.5-hybrid-single</br>4.6.7-foss-2015g-hybrid-single-PLUMED</br>5.0.4-foss-2015e-hybrid-single-PLUMED</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1-test</br>5.1.2-intel-2015b-hybrid-single-cuda</br>5.1.2-foss-2015g-hybrid-single-PLUMED</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1</br>5.0.4-foss-2015g-hybrid-single</br>5.0.4-foss-2015g-hybrid-single-PLUMED | `--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`--A`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-` |
-| gsl | 1.16-icc</br>1.16-gcc | `--A`</br>`--A` |
-| GSL | 1.16-intel-2016.01</br>2.1-intel-2015b</br>1.16-intel-2015b | `--A`</br>`--A`</br>`-SA` |
-| guile | 1.8.8-foss-2015b</br>1.8.8-foss-2016a</br>1.8.8</br>1.8.8-intel-2015b</br>1.8.8-foss-2015g | `-S-`</br>`--A`</br>`-SA`</br>`-SA`</br>`-S-` |
+| GROMACS | 4.6.7-foss-2015g-hybrid-single-PLUMED</br>5.0.4-foss-2015e-hybrid-single-PLUMED</br>5.0.4-foss-2015g-hybrid-single</br>5.0.4-foss-2015g-hybrid-single-PLUMED</br>5.0.4-ictce-7.3.5-hybrid-single</br>5.1.2-foss-2015g-hybrid-single-PLUMED</br>5.1.2-intel-2015b-hybrid-single-cuda</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1-test</br>5.1.2-intel-2016a-hybrid</br>5.1.4-foss-2016a-hybrid-single-PLUMED | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`-S-` |
+| gsl | 1.16-gcc</br>1.16-icc | `--A`</br>`--A` |
+| GSL | 1.16-intel-2015b</br>1.16-intel-2016.01</br>2.1-intel-2015b | `-SA`</br>`--A`</br>`--A` |
+| guile | 1.8.8</br>1.8.8-foss-2015b</br>1.8.8-foss-2015g</br>1.8.8-foss-2016a</br>1.8.8-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA` |
 | gupc | 4.8.0.3 | `--A` |
-| gzip | 1.6-intel-2015b</br>1.6</br>1.6-foss-2016a</br>1.6-foss-2015g | `-SA`</br>`-SA`</br>`--A`</br>`USA` |
+| gzip | 1.6</br>1.6-foss-2015g</br>1.6-foss-2016a</br>1.6-intel-2015b | `-SA`</br>`USA`</br>`-SA`</br>`-SA` |
 | h5py | 2.4.0-ictce-7.3.5-Python-2.7.9-serial | `-S-` |
 | Harminv | 1.4-intel-2015b | `-S-` |
-| HDF5 | 1.8.16-intel-2016.01</br>1.8.13-intel-2016.01</br>1.8.16-foss-2015g</br>1.8.13-foss-2015g</br>1.8.13-intel-2015b</br>1.8.16-intel-2017.00</br>1.8.14-ictce-7.3.5-serial</br>1.8.13-intel-2015b-no-mpi</br>1.8.16-foss-2016a</br>1.10.0-patch1-intel-2016.01-mic</br>1.8.16-intel-2015b</br>1.8.15-patch1-foss-2015b</br>1.8.16-intel-2015b-threadsafe | `--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`USA`</br>`US-`</br>`-S-` |
-| hdf5 | 1.8.13</br>1.8.11 | `--A`</br>`--A` |
-| hdf5-parallel | 1.8.13-gcc49</br>1.8.13-gcc</br>1.8.13</br>1.8.11-gcc</br>1.8.11 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| HDF5 | 1.8.13-foss-2015g</br>1.8.13-intel-2015b</br>1.8.13-intel-2015b-no-mpi</br>1.8.13-intel-2016.01</br>1.8.14-ictce-7.3.5-serial</br>1.8.15-patch1-foss-2015b</br>1.8.16-foss-2015g</br>1.8.16-foss-2016a</br>1.8.16-intel-2015b</br>1.8.16-intel-2015b-threadsafe</br>1.8.16-intel-2016.01</br>1.8.16-intel-2017.00</br>1.8.17-intel-2017a</br>1.10.0-patch1-intel-2016.01-mic | `-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`USA`</br>`USA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-` |
+| hdf5 | 1.8.11</br>1.8.13 | `--A`</br>`--A` |
+| hdf5-parallel | 1.8.11</br>1.8.11-gcc</br>1.8.13</br>1.8.13-gcc</br>1.8.13-gcc49 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| help2man | 1.47.4-GCCcore-6.3.0 | `-SA` |
 | hpg-aligner | 1.0.0 | `--A` |
 | hpg-fastq | 1.0.0 | `--A` |
 | hpg-variant | 1.0.0 | `--A` |
-| HPL | 2.1-intel-2015b</br>2.1-foss-2015b | `-S-`</br>`-S-` |
+| HPL | 2.1-foss-2015b</br>2.1-intel-2015b | `-S-`</br>`-S-` |
 | HTSlib | 1.3-foss-2015g | `-S-` |
-| hwloc | 1.11.0</br>1.5-GCC-4.4.7-system</br>1.11.1-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.11.2-GCC-4.9.3-2.25</br>1.11.0-GNU-5.1.0-2.25</br>1.11.3-GCC-5.3.0-2.26</br>1.11.0-GNU-4.9.3-2.25 | `-SA`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA`</br>`USA` |
+| hwloc | 1.5-GCC-4.4.7-system</br>1.11.0</br>1.11.0-GNU-4.9.3-2.25</br>1.11.0-GNU-5.1.0-2.25</br>1.11.1-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.11.2-GCC-4.9.3-2.25</br>1.11.3-GCC-5.3.0-2.26</br>1.11.4-iccifort-2017.1.132-GCC-5.4.0-2.26</br>1.11.4-GCC-6.2.0-2.27</br>1.11.5-GCC-6.3.0-2.27 | `-S-`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA` |
 | hypermesh | 12.0.110 | `--A` |
 | HyperWorks | 13.0 | `-S-` |
 | hyperworks | 13.0 | `--A` |
-| Hypre | 2.10.0b-intel-2015b</br>2.10.0b-intel-2016.01</br>2.10.1-intel-2015b | `-S-`</br>`-S-`</br>`-S-` |
-| icc | 2013.5.192-GCC-4.8.3</br>2013.5.192</br>2015.3.187-GNU-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2016.1.150-GCC-4.9.3</br>2015.3.187-GNU-5.1.0-2.25</br>2017.0.098-GCC-5.4.0-2.26</br>2016.1.150-GCC-4.9.3-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2015.3.187 | `-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| iccifort | 2013.5.192-GCC-4.8.3</br>2013.5.192</br>2015.3.187-GNU-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2016.1.150-GCC-4.9.3</br>2015.3.187-GNU-5.1.0-2.25</br>2017.0.098-GCC-5.4.0-2.26</br>2016.1.150-GCC-4.9.3-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2015.3.187 | `-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| ictce | 7.3.5</br>5.5.0</br>8.3.5 | `-S-`</br>`-S-`</br>`-S-` |
-| ifort | 2013.5.192-GCC-4.8.3</br>2013.5.192</br>2015.3.187-GNU-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2016.1.150-GCC-4.9.3</br>2015.3.187-GNU-5.1.0-2.25</br>2017.0.098-GCC-5.4.0-2.26</br>2016.1.150-GCC-4.9.3-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2015.3.187 | `-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| iimpi | 5.5.0</br>8.1.5-GCC-4.9.3-2.25</br>5.5.0-GCC-4.8.3</br>7.3.5-GNU-5.1.0-2.25</br>8.3.5</br>2016.00-GCC-4.9.3</br>2016.01-GCC-4.9.3-2.25</br>2016.01-GCC-4.9.3</br>2017.00-GCC-5.4.0-2.26</br>7.3.5</br>2016.03-GCC-5.3.0-2.26 | `-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-` |
-| imkl | 11.2.3.187</br>11.3.3.210-iimpi-2016.03-GCC-5.3.0-2.26</br>11.3.1.150-iimpi-2016.01-GCC-4.9.3-2.25</br>11.3.1.150-iimpi-8.1.5-GCC-4.9.3-2.25</br>2017.0.098-iimpi-2017.00-GCC-5.4.0-2.26</br>11.3.1.150-iimpi-8.3.5</br>11.0.5.192-iimpi-5.5.0</br>11.3.1.150-iimpi-2016.00-GCC-4.9.3</br>11.2.3.187-iimpi-7.3.5</br>11.3.0.109-iimpi-2016.00-GCC-4.9.3</br>11.2.3.187-iompi-2015.03</br>11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25</br>11.0.5.192-iimpi-5.5.0-GCC-4.8.3</br>11.2.3.187-gimpi-2.11.5 | `-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`--A` |
-| impi | 4.1.1.036-iccifort-2013.5.192</br>5.0.3.048-iccifort-2015.3.187</br>4.1.1.036</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3-2.25</br>4.1.1.036-iccifort-2013.5.192-GCC-4.8.3</br>2017.0.098-iccifort-2017.0.098-GCC-5.4.0-2.26</br>5.1.3.181-iccifort-2016.3.210-GCC-5.3.0-2.26</br>5.0.3.048-GCC-4.9.3</br>5.1.2.150-iccifort-2016.1.150</br>5.0.3.048-iccifort-2015.3.187-GNU-5.1.0-2.25</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3</br>2017-BETA.ENG</br>5.0.3.048</br>5.1.1.109-iccifort-2016.0.109-GCC-4.9.3 | `-S-`</br>`-S-`</br>`--A`</br>`-SA`</br>`-S-`</br>`USA`</br>`-S-`</br>`--A`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| inputproto | 2.3</br>2.3-foss-2015g</br>2.3-intel-2015b</br>2.3.1-intel-2016a</br>2.3.1-foss-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| Hypre | 2.10.0b-intel-2015b</br>2.10.0b-intel-2016.01</br>2.10.1-intel-2015b</br>2.11.1-intel-2017a | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| icc | 2013.5.192</br>2013.5.192-GCC-4.8.3</br>2015.3.187</br>2015.3.187-GNU-4.9.3-2.25</br>2015.3.187-GNU-5.1.0-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2016.1.150-GCC-4.9.3</br>2016.1.150-GCC-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-GCC-6.3.0-2.27 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
+| iccifort | 2013.5.192</br>2013.5.192-GCC-4.8.3</br>2015.3.187</br>2015.3.187-GNU-4.9.3-2.25</br>2015.3.187-GNU-5.1.0-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2016.1.150-GCC-4.9.3</br>2016.1.150-GCC-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-GCC-6.3.0-2.27 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
+| ictce | 5.5.0</br>7.3.5</br>8.3.5 | `-S-`</br>`-S-`</br>`-S-` |
+| ifort | 2013.5.192</br>2013.5.192-GCC-4.8.3</br>2015.3.187</br>2015.3.187-GNU-4.9.3-2.25</br>2015.3.187-GNU-5.1.0-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2016.1.150-GCC-4.9.3</br>2016.1.150-GCC-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-GCC-6.3.0-2.27 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
+| iimpi | 5.5.0</br>5.5.0-GCC-4.8.3</br>7.3.5</br>7.3.5-GNU-5.1.0-2.25</br>8.1.5-GCC-4.9.3-2.25</br>8.3.5</br>2016.00-GCC-4.9.3</br>2016.01-GCC-4.9.3</br>2016.01-GCC-4.9.3-2.25</br>2016.03-GCC-5.3.0-2.26</br>2017.00-GCC-5.4.0-2.26</br>2017.01-GCC-5.4.0-2.26</br>2017a | `-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
+| imkl | 11.0.5.192-iimpi-5.5.0</br>11.0.5.192-iimpi-5.5.0-GCC-4.8.3</br>11.2.3.187</br>11.2.3.187-gimpi-2.11.5</br>11.2.3.187-iimpi-7.3.5</br>11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25</br>11.2.3.187-iompi-2015.03</br>11.3.0.109-iimpi-2016.00-GCC-4.9.3</br>11.3.1.150-iimpi-8.1.5-GCC-4.9.3-2.25</br>11.3.1.150-iimpi-8.3.5</br>11.3.1.150-iimpi-2016.00-GCC-4.9.3</br>11.3.1.150-iimpi-2016.01-GCC-4.9.3-2.25</br>11.3.3.210-iimpi-2016.03-GCC-5.3.0-2.26</br>2017.0.098-iimpi-2017.00-GCC-5.4.0-2.26</br>2017.1.132-iimpi-2017.01-GCC-5.4.0-2.26</br>2017.1.132-iimpi-2017a | `-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
+| impi | 4.1.1.036</br>4.1.1.036-iccifort-2013.5.192</br>4.1.1.036-iccifort-2013.5.192-GCC-4.8.3</br>5.0.3.048</br>5.0.3.048-iccifort-2015.3.187</br>5.0.3.048-iccifort-2015.3.187-GNU-5.1.0-2.25</br>5.0.3.048-GCC-4.9.3</br>5.1.1.109-iccifort-2016.0.109-GCC-4.9.3</br>5.1.2.150-iccifort-2016.1.150</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3-2.25</br>5.1.3.181-iccifort-2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-iccifort-2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-iccifort-2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-iccifort-2017.1.132-GCC-6.3.0-2.27</br>2017-BETA.ENG | `--A`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA`</br>`-S-` |
+| inputproto | 2.3</br>2.3.1-foss-2016a</br>2.3.1-intel-2016a</br>2.3-foss-2015g</br>2.3-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
 | Inspector | 2016_update1 | `-S-` |
 | inspector_xe | 2013.5</br>2015.1.2.379161 | `--A`</br>`--A` |
-| intel | 2014.06</br>2017.00</br>2016.03-GCC-5.3</br>15.2.164</br>2016.00</br>2016.01</br>14.0.1</br>2015b</br>2016a</br>15.3.187</br>13.5.192</br>2015b-intel-2015b | `-S-`</br>`USA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-SA`</br>`--A`</br>`USA`</br>`-SA`</br>`--A`</br>`--A`</br>`-S-` |
+| intel | 13.5.192</br>14.0.1</br>15.2.164</br>15.3.187</br>2014.06</br>2015b</br>2015b-intel-2015b</br>2016.00</br>2016.01</br>2016.03-GCC-5.3</br>2016a</br>2017.00</br>2017.01</br>2017a | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`USA`</br>`-SA`</br>`-SA` |
 | intelpcm | 2.6 | `--A` |
+| iompi | 2017.01 | `-S-` |
 | ipm | 0.983-icc-impi | `--A` |
-| ipp | 14.0.1</br>13.5.192</br>15.3.187</br>9.0.1.150</br>15.2.164 | `--A`</br>`--A`</br>`--A`</br>`-S-`</br>`--A` |
-| ISL | 0.15</br>0.15-GCC-4.9.3-2.25</br>0.14-GNU-5.1.0-2.25</br>0.15-GNU-4.9.3-2.25 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| itac | 9.0.3.051</br>9.1.2.024</br>8.1.4.045 | `--A`</br>`-S-`</br>`--A` |
+| ipp | 9.0.1.150</br>13.5.192</br>14.0.1</br>15.2.164</br>15.3.187 | `-S-`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| ISL | 0.14-GNU-5.1.0-2.25</br>0.15</br>0.15-GCC-4.9.3-2.25</br>0.15-GNU-4.9.3-2.25 | `-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
+| itac | 8.1.4.045</br>9.0.3.051</br>9.1.2.024 | `--A`</br>`--A`</br>`-S-` |
 | JasPer | 1.900.1-intel-2015b | `-S-` |
 | java | 1.7 | `--A` |
-| Java | 1.8.0_112</br>1.8.0_51</br>1.8.0_72</br>1.7.0_79 | `-S-`</br>`USA`</br>`US-`</br>`USA` |
+| Java | 1.7.0_79</br>1.8.0_51</br>1.8.0_72</br>1.8.0_112</br>1.8.0_121 | `USA`</br>`USA`</br>`US-`</br>`-S-`</br>`-S-` |
 | JOE | 4.2 | `-SA` |
 | JUnit | 4.11-Java-1.7.0_79 | `-S-` |
-| kbproto | 1.0.6</br>1.0.6-foss-2015g</br>1.0.7-intel-2016a</br>1.0.7-foss-2016a</br>1.0.7</br>1.0.6-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| kbproto | 1.0.6</br>1.0.6-foss-2015g</br>1.0.6-intel-2015b</br>1.0.7</br>1.0.7-foss-2016a</br>1.0.7-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
 | lam | 7.1.4-icc | `--A` |
 | LAMMPS | 28Jun14-intel-2015b | `-S-` |
 | lammps | 28Jun14 | `--A` |
 | LAPACKE | 3.5.0-LAPACK-3.5.0 | `-S-` |
 | libctl | 3.2.2-intel-2015b | `-S-` |
-| libdrm | 2.4.68-intel-2016a</br>2.4.68-foss-2016a</br>2.4.27</br>2.4.27-foss-2015g</br>2.4.67-intel-2016a | `-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
-| libffi | 3.1-GNU-5.1.0-2.25</br>3.1-intel-2016.01</br>3.1-foss-2015b</br>3.1-intel-2015b</br>3.0.13-intel-2015b</br>3.2.1-intel-2017.00</br>3.0.13-foss-2015b</br>3.2.1-GCC-4.4.7-system</br>3.0.13</br>3.0.13-foss-2015g</br>3.2.1-foss-2016a | `-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`US-`</br>`U--`</br>`-SA`</br>`-S-`</br>`--A` |
+| libdrm | 2.4.27</br>2.4.27-foss-2015g</br>2.4.67-intel-2016a</br>2.4.68-foss-2016a</br>2.4.68-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libevent | 2.1.8 | `-SA` |
+| libffi | 3.0.13</br>3.0.13-foss-2015b</br>3.0.13-foss-2015g</br>3.0.13-intel-2015b</br>3.1-foss-2015b</br>3.1-intel-2015b</br>3.1-intel-2016.01</br>3.1-GNU-5.1.0-2.25</br>3.2.1-foss-2016a</br>3.2.1-intel-2017.00</br>3.2.1-intel-2017a</br>3.2.1-GCC-4.4.7-system | `-SA`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`U--` |
 | libfontenc | 1.1.3</br>1.1.3-foss-2015g</br>1.1.3-foss-2016a</br>1.1.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| libgdiplus | 3.12</br>3.12-GCC-4.4.7-system</br>3.12-intel-2016.01</br>3.12-GNU-5.1.0-2.25 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libgdiplus | 3.12</br>3.12-intel-2016.01</br>3.12-GCC-4.4.7-system</br>3.12-GNU-5.1.0-2.25 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
 | libGLU | 9.0.0-foss-2015g | `-S-` |
-| libICE | 1.0.9-foss-2015g</br>1.0.9-intel-2015b</br>1.0.9 | `-S-`</br>`-S-`</br>`-SA` |
-| Libint | 1.1.4-gompi-2015b</br>1.1.4-intel-2015b</br>1.1.4-foss-2015b | `-S-`</br>`US-`</br>`-S-` |
-| libjpeg-turbo | 1.4.0</br>1.4.2-intel-2017.00</br>1.4.0-foss-2015g</br>1.3.1-intel-2015b</br>1.4.1-foss-2015b</br>1.4.0-intel-2015b</br>1.3.1-foss-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-` |
-| libmatheval | 1.1.11-intel-2015b</br>1.1.8-foss-2016a</br>1.1.8</br>1.1.8-foss-2015b</br>1.1.8-foss-2015g</br>1.1.8-intel-2015b | `--A`</br>`--A`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA` |
+| libICE | 1.0.9</br>1.0.9-foss-2015g</br>1.0.9-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
+| Libint | 1.1.4-foss-2015b</br>1.1.4-gompi-2015b</br>1.1.4-intel-2015b | `-S-`</br>`-S-`</br>`US-` |
+| libjpeg-turbo | 1.3.1-foss-2015b</br>1.3.1-intel-2015b</br>1.4.0</br>1.4.0-foss-2015g</br>1.4.0-intel-2015b</br>1.4.1-foss-2015b</br>1.4.2-intel-2017.00 | `-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-` |
+| libmatheval | 1.1.8</br>1.1.8-foss-2015b</br>1.1.8-foss-2015g</br>1.1.8-foss-2016a</br>1.1.8-intel-2015b</br>1.1.11-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`--A` |
 | libMesh | 0.9.5-intel-2016.01 | `-S-` |
 | libmesh | 0.9.3-petsc-3.4.4-icc-impi-mkl-dbg</br>0.9.3-petsc-3.4.4-icc-impi-mkl-dbg-2d</br>0.9.3-petsc-3.4.4-icc-impi-mkl-opt | `--A`</br>`--A`</br>`--A` |
-| libpciaccess | 0.13.1-foss-2015g</br>0.13.4-foss-2016a</br>0.13.4-intel-2016a</br>0.13.1 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
+| libpciaccess | 0.13.1</br>0.13.1-foss-2015g</br>0.13.4-foss-2016a</br>0.13.4-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
 | libpng | 1.6.16-intel-2015b | `-S-` |
-| libpthread-stubs | 0.3-intel-2015b</br>0.3</br>0.3-foss-2016a</br>0.3-intel-2016a</br>0.3-foss-2015g | `-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| libreadline | 6.3-gimkl-2.11.5 | `--A` |
-| libSM | 1.2.2-foss-2015g</br>1.2.2</br>1.2.2-intel-2015b | `-S-`</br>`-SA`</br>`-S-` |
-| LibTIFF | 4.0.3-intel-2015b</br>4.0.3 | `-S-`</br>`-SA` |
-| libunistring | 0.9.3-foss-2015b</br>0.9.3</br>0.9.3-foss-2015g</br>0.9.3-intel-2015b</br>0.9.3-foss-2016a | `-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`--A` |
+| libpthread-stubs | 0.3</br>0.3-foss-2015g</br>0.3-foss-2016a</br>0.3-intel-2015b</br>0.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libreadline | 6.3-gimkl-2.11.5</br>6.3-intel-2017a | `--A`</br>`-S-` |
+| libSM | 1.2.2</br>1.2.2-foss-2015g</br>1.2.2-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
+| LibTIFF | 4.0.3</br>4.0.3-intel-2015b | `-SA`</br>`-S-` |
+| libtool | 2.4.6-intel-2017a</br>2.4.6-GCC-6.3.0-2.27 | `-S-`</br>`-SA` |
+| libunistring | 0.9.3</br>0.9.3-foss-2015b</br>0.9.3-foss-2015g</br>0.9.3-foss-2016a</br>0.9.3-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA` |
 | libunwind | 1.1-GCC-5.4.0-2.26 | `-S-` |
-| libX11 | 1.6.2-foss-2015g-Python-2.7.9</br>1.6.3-foss-2016a</br>1.6.3-intel-2016a</br>1.6.2-Python-2.7.8</br>1.6.2-Python-2.7.9</br>1.6.2-intel-2015b-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| libXau | 1.0.8-intel-2016a</br>1.0.8-foss-2015g</br>1.0.8-foss-2016a</br>1.0.8</br>1.0.8-intel-2015b | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| libxc | 2.2.0-gompi-2015b</br>2.2.1-intel-2015b</br>2.2.0-foss-2015b | `-S-`</br>`US-`</br>`-S-` |
-| libxcb | 1.11.1-foss-2016a</br>1.11-foss-2015g-Python-2.7.9</br>1.11.1-intel-2016a</br>1.10-Python-2.7.8</br>1.11-Python-2.7.9</br>1.11-intel-2015b-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| libXdamage | 1.1.4-intel-2016a</br>1.1.4-foss-2016a</br>1.1.4-foss-2015g-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
-| libXdmcp | 1.1.2-intel-2016a</br>1.1.2</br>1.1.2-foss-2015g</br>1.1.2-intel-2015b</br>1.1.2-foss-2016a | `-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| libXext | 1.3.3-intel-2016a</br>1.3.3-foss-2015g-Python-2.7.9</br>1.3.3-intel-2015b</br>1.3.2-Python-2.7.8</br>1.3.3-foss-2016a</br>1.3.3</br>1.3.3-foss-2015g | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| libXfixes | 5.0.1</br>5.0.1-intel-2016a</br>5.0.1-foss-2015g</br>5.0.1-foss-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| libXfont | 1.5.1-Python-2.7.9</br>1.5.1-intel-2016a-freetype-2.6.3</br>1.5.1-intel-2016a</br>1.5.1-foss-2016a-freetype-2.6.3</br>1.5.1-foss-2015g-Python-2.7.9 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libX11 | 1.6.2-foss-2015g-Python-2.7.9</br>1.6.2-intel-2015b-Python-2.7.9</br>1.6.2-Python-2.7.8</br>1.6.2-Python-2.7.9</br>1.6.3-foss-2016a</br>1.6.3-intel-2016a | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
+| libXau | 1.0.8</br>1.0.8-foss-2015g</br>1.0.8-foss-2016a</br>1.0.8-intel-2015b</br>1.0.8-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libxc | 2.2.0-foss-2015b</br>2.2.0-gompi-2015b</br>2.2.1-intel-2015b | `-S-`</br>`-S-`</br>`US-` |
+| libxcb | 1.10-Python-2.7.8</br>1.11.1-foss-2016a</br>1.11.1-intel-2016a</br>1.11-foss-2015g-Python-2.7.9</br>1.11-intel-2015b-Python-2.7.9</br>1.11-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
+| libXdamage | 1.1.4-foss-2015g-Python-2.7.9</br>1.1.4-foss-2016a</br>1.1.4-intel-2016a | `-S-`</br>`-S-`</br>`-S-` |
+| libXdmcp | 1.1.2</br>1.1.2-foss-2015g</br>1.1.2-foss-2016a</br>1.1.2-intel-2015b</br>1.1.2-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libXext | 1.3.2-Python-2.7.8</br>1.3.3</br>1.3.3-foss-2015g</br>1.3.3-foss-2015g-Python-2.7.9</br>1.3.3-foss-2016a</br>1.3.3-intel-2015b</br>1.3.3-intel-2016a | `-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libXfixes | 5.0.1</br>5.0.1-foss-2015g</br>5.0.1-foss-2016a</br>5.0.1-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libXfont | 1.5.1-foss-2015g-Python-2.7.9</br>1.5.1-foss-2016a-freetype-2.6.3</br>1.5.1-intel-2016a</br>1.5.1-intel-2016a-freetype-2.6.3</br>1.5.1-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
 | libXft | 2.3.2-intel-2015b | `-S-` |
 | libXinerama | 1.1.3-intel-2015b | `-S-` |
-| libxml2 | 2.9.2</br>2.9.2-GCC-4.4.7-system</br>2.9.2-foss-2015b</br>2.9.2-foss-2015g</br>2.9.3-foss-2016a</br>2.9.2-ictce-7.3.5</br>2.9.2-gompi-2015e</br>2.9.3-intel-2017.00</br>2.9.2-GNU-5.1.0-2.25</br>2.9.2-foss-2015g-Python-2.7.9</br>2.9.2-GCC-4.9.3-2.25</br>2.9.2-intel-2015b</br>2.9.2-GNU-4.9.3-2.25</br>2.9.3-intel-2016a | `-S-`</br>`U--`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA` |
+| libxml2 | 2.9.2</br>2.9.2-foss-2015b</br>2.9.2-foss-2015g</br>2.9.2-foss-2015g-Python-2.7.9</br>2.9.2-gompi-2015e</br>2.9.2-ictce-7.3.5</br>2.9.2-intel-2015b</br>2.9.2-GCC-4.4.7-system</br>2.9.2-GCC-4.9.3-2.25</br>2.9.2-GNU-4.9.3-2.25</br>2.9.2-GNU-5.1.0-2.25</br>2.9.3-foss-2016a</br>2.9.3-intel-2016a</br>2.9.3-intel-2017.00 | `-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`U--`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`US-` |
 | libXrender | 0.9.8</br>0.9.8-intel-2015b | `-S-`</br>`-S-` |
 | libxslt | 1.1.28-intel-2015b | `-S-` |
-| libXt | 1.1.4-libX11-1.6.2</br>1.1.4-foss-2015g-libX11-1.6.2</br>1.1.4-intel-2015b-libX11-1.6.2</br>1.1.5-foss-2015g | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| libXt | 1.1.4-foss-2015g-libX11-1.6.2</br>1.1.4-intel-2015b-libX11-1.6.2</br>1.1.4-libX11-1.6.2</br>1.1.5-foss-2015g | `-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
 | libyaml | 0.1.6-intel-2015b | `-S-` |
-| likwid | 3.1.2-mic</br>3.1.2-gcc</br>3.1.1-icc</br>4.1.2-intel</br>4.1.2-gcc</br>3.1.1-mic</br>3.1.2-icc | `--A`</br>`--A`</br>`--A`</br>`-S-`</br>`-S-`</br>`--A`</br>`--A` |
-| LLVM | 3.8.0-foss-2016a</br>3.7.1-foss-2015g</br>3.8.0-intel-2016a</br>3.7.1-intel-2016a</br>3.9.0-intel-2017.00 | `US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-` |
+| likwid | 3.1.1-icc</br>3.1.1-mic</br>3.1.2-gcc</br>3.1.2-icc</br>3.1.2-mic</br>4.1.2-gcc</br>4.1.2-intel | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`-S-`</br>`-S-` |
+| LLVM | 3.7.1-foss-2015g</br>3.7.1-intel-2016a</br>3.8.0-foss-2016a</br>3.8.0-intel-2016a</br>3.9.0-intel-2017.00 | `-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`US-` |
 | llvm | 3.6.0 | `--A` |
 | lmod | 7.2.2 | `USA` |
-| Lmod | 7.2.2</br>7.0.6 | `-SA`</br>`-S-` |
+| Lmod | 7.0.6</br>7.2.2 | `-S-`</br>`-SA` |
 | lsdyna | 7.x.x | `--A` |
 | lsprepost | 4.2 | `--A` |
 | Lua | 5.1.4-8 | `USA` |
 | lux | 1.3.1 | `--A` |
 | lxml | 3.4.4-intel-2015b-Python-2.7.9 | `-S-` |
-| M4 | 1.4.17-intel-2016.01</br>1.4.17-foss-2015b</br>1.4.17-GNU-4.9.3-2.25</br>1.4.17-GCCcore-4.9.3</br>1.4.17-foss-2015g</br>1.4.17-GCC-4.9.3-2.25</br>1.4.17-foss-2016a</br>1.4.17-intel-2015b</br>1.4.17-GCCcore-5.4.0</br>1.4.17</br>1.4.17-GCC-5.1.0-binutils-2.25</br>1.4.17-GNU-5.1.0-2.25</br>1.4.17-GCCcore-5.3.0</br>1.4.17-intel-2016a</br>1.4.16-foss-2015g</br>1.4.16-intel-2015b</br>1.4.17-intel-2017.00</br>1.4.17-GCC-4.9.3</br>1.4.17-GCC-4.9.3-binutils-2.25</br>1.4.17-GCCcore-6.2.0</br>1.4.17-GCCcore-5.3.1-snapshot-20160419 | `-S-`</br>`US-`</br>`USA`</br>`USA`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`USA`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`--A`</br>`USA`</br>`US-`</br>`-S-` |
-| magma | 1.3.0-mic</br>1.1.0-mic | `--A`</br>`--A` |
+| M4 | 1.4.16-foss-2015g</br>1.4.16-intel-2015b</br>1.4.17</br>1.4.17-foss-2015b</br>1.4.17-foss-2015g</br>1.4.17-foss-2016a</br>1.4.17-intel-2015b</br>1.4.17-intel-2016.01</br>1.4.17-intel-2016a</br>1.4.17-intel-2017.00</br>1.4.17-GCCcore-4.9.3</br>1.4.17-GCCcore-5.3.0</br>1.4.17-GCCcore-5.3.1-snapshot-20160419</br>1.4.17-GCCcore-5.4.0</br>1.4.17-GCCcore-6.2.0</br>1.4.17-GCC-4.9.3</br>1.4.17-GCC-4.9.3-2.25</br>1.4.17-GCC-4.9.3-binutils-2.25</br>1.4.17-GCC-5.1.0-binutils-2.25</br>1.4.17-GNU-4.9.3-2.25</br>1.4.17-GNU-5.1.0-2.25</br>1.4.18-GCCcore-6.3.0 | `-S-`</br>`-SA`</br>`USA`</br>`US-`</br>`-S-`</br>`USA`</br>`USA`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`US-`</br>`--A`</br>`-S-`</br>`USA`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA` |
+| magma | 1.1.0-mic</br>1.3.0-mic | `--A`</br>`--A` |
 | make | 3.82</br>3.82-intel-2015b | `-SA`</br>`US-` |
-| makedepend | 1.0.4</br>1.0.5-foss-2016a</br>1.0.4-foss-2015g</br>1.0.5-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| MAP | 5.0.1</br>4.2 | `-S-`</br>`-S-` |
+| makedepend | 1.0.4</br>1.0.4-foss-2015g</br>1.0.5-foss-2016a</br>1.0.5-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| MAP | 4.2</br>5.0.1 | `-S-`</br>`-S-` |
 | marc | 2011</br>2013.1 | `--A`</br>`--A` |
 | Marc | 2013.1.0 | `-S-` |
-| matlab | R2013a-EDU</br>R2014a-EDU</br>R2014a-COM</br>R2013a-COM | `--A`</br>`--A`</br>`--A`</br>`--A` |
-| MATLAB | 2015b-EDU</br>2015a-EDU</br>2015a-COM</br>2015b-COM | `-SA`</br>`US-`</br>`US-`</br>`-SA` |
+| MATIO | 1.5.2-intel-2017a | `-S-` |
+| matlab | R2013a-COM</br>R2013a-EDU</br>R2014a-COM</br>R2014a-EDU | `--A`</br>`--A`</br>`--A`</br>`--A` |
+| MATLAB | 2015a-COM</br>2015a-EDU</br>2015b-COM</br>2015b-EDU | `US-`</br>`US-`</br>`-SA`</br>`-SA` |
 | matplotlib | 1.4.3-intel-2015b-Python-2.7.9 | `-S-` |
 | Maven | 3.3.9 | `USA` |
 | maxwell | 3.0 | `--A` |
 | Meep | 1.3-intel-2015b | `-S-` |
 | memoryscape | 3.4 | `--A` |
 | mercurial | 2.9.1 | `--A` |
-| Mercurial | 3.7.3-foss-2015g-Python-2.7.9</br>3.5-Python-2.7.9 | `USA`</br>`-S-` |
+| Mercurial | 3.5-Python-2.7.9</br>3.7.3-foss-2015g-Python-2.7.9 | `-S-`</br>`USA` |
 | Mesa | 11.0.8-foss-2015g-Python-2.7.9</br>11.2.1-foss-2016a | `-S-`</br>`-S-` |
-| METIS | 5.1.0-intel-2017.00</br>5.1.0-intel-2016.01-32bitIDX</br>5.1.0-intel-2015b</br>5.1.0-intel-2016.01 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
-| MIKE | default</br>2014</br>2016</br>2016-SP2 | `--A`</br>`-SA`</br>`-SA`</br>`-SA` |
-| mkl | 14.0.1</br>13.5.192</br>15.3.187</br>15.2.164 | `--A`</br>`--A`</br>`--A`</br>`--A` |
+| METIS | 5.1.0-intel-2015b</br>5.1.0-intel-2016.01</br>5.1.0-intel-2016.01-32bitIDX</br>5.1.0-intel-2017.00 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| MIKE | 2014</br>2016</br>2016-SP2</br>default | `-SA`</br>`-SA`</br>`-SA`</br>`--A` |
+| mkl | 13.5.192</br>14.0.1</br>15.2.164</br>15.3.187 | `--A`</br>`--A`</br>`--A`</br>`--A` |
 | MLD2P4 | 2.0-rc4-GCC-4.9.3-2.25 | `--A` |
 | modflow-2005 | 1.11.00 | `--A` |
-| modflow-nwt | 1.0.9-aquaveo</br>1.0.9 | `--A`</br>`--A` |
+| modflow-nwt | 1.0.9</br>1.0.9-aquaveo | `--A`</br>`--A` |
 | Molpro | 2010.1-patch-57-intel2015b | `-S-` |
 | molpro | 2010.1-p45-intel | `--A` |
-| mono | 3.12.1</br>3.2.3 | `--A`</br>`--A` |
-| Mono | 3.12.1-GCC-4.4.7-system</br>4.2.2.10-intel-2016.01</br>3.12.1</br>4.0.3.20-GNU-5.1.0-2.25 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| mono | 3.2.3</br>3.12.1 | `--A`</br>`--A` |
+| Mono | 3.12.1</br>3.12.1-GCC-4.4.7-system</br>4.0.3.20-GNU-5.1.0-2.25</br>4.2.2.10-intel-2016.01</br>4.6.2.16 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
 | motif | 2.3.4-foss-2015g-libX11-1.6.2</br>2.3.4-intel-2015b-libX11-1.6.2 | `-S-`</br>`-S-` |
 | MPC | 1.0.2-intel-2017.00 | `-S-` |
 | MPFR | 3.1.5-intel-2017.00 | `-S-` |
-| mpi.net | 1.0.0-mono-3.12.1</br>1.0.0</br>1.0.0-impi | `--A`</br>`--A`</br>`--A` |
+| mpi.net | 1.0.0</br>1.0.0-impi</br>1.0.0-mono-3.12.1 | `--A`</br>`--A`</br>`--A` |
 | MPI_NET | 1.2.0-gompi-2015e</br>1.2.0-intel-2016.01 | `-S-`</br>`-S-` |
-| MPICH | 3.2-GCC-5.3.1-snapshot-20160419-2.25</br>3.2-GCC-5.3.0-2.25</br>3.2-GCC-4.9.3-2.25 | `-S-`</br>`-S-`</br>`--A` |
+| MPICH | 3.2-GCC-4.9.3-2.25</br>3.2-GCC-5.3.0-2.25</br>3.2-GCC-5.3.1-snapshot-20160419-2.25 | `--A`</br>`-S-`</br>`-S-` |
 | mpt | 2.12 | `-S-` |
-| mvapich2 | 1.9-icc</br>1.9-gcc</br>1.9-gcc46 | `--A`</br>`--A`</br>`--A` |
-| MVAPICH2 | 2.1-GNU-5.1.0-2.25</br>2.1-iccifort-2015.3.187-GNU-5.1.0-2.25</br>2.1-GCC-4.4.7-system | `-S-`</br>`-S-`</br>`-S-` |
+| MUMPS | 5.0.2-intel-2017a-parmetis | `-S-` |
+| mvapich2 | 1.9-gcc</br>1.9-gcc46</br>1.9-icc | `--A`</br>`--A`</br>`--A` |
+| MVAPICH2 | 2.1-iccifort-2015.3.187-GNU-5.1.0-2.25</br>2.1-GCC-4.4.7-system</br>2.1-GNU-5.1.0-2.25 | `-S-`</br>`-S-`</br>`-S-` |
 | mxml | 2.9 | `--A` |
 | namd | 2.8 | `--A` |
 | NAMD | 2.9-mpi | `-S-` |
-| NASM | 2.11.05</br>2.11.08-intel-2017.00</br>2.11.05-foss-2015g</br>2.11.05-foss-2015b</br>2.11.06-intel-2015b</br>2.11.05-intel-2015b</br>2.11.08-foss-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-` |
+| NASM | 2.11.05</br>2.11.05-foss-2015b</br>2.11.05-foss-2015g</br>2.11.05-intel-2015b</br>2.11.06-intel-2015b</br>2.11.08-foss-2015b</br>2.11.08-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-` |
 | nastran | 2013.1.1 | `--A` |
-| ncurses | 5.9-GCC-4.9.3-2.25</br>5.9-gimkl-2.11.5 | `--A`</br>`--A` |
-| netcdf | 4.3.0</br>4.2.1.1 | `--A`</br>`--A` |
-| netCDF | 4.4.0-intel-2017.00</br>4.3.2-intel-2016.01</br>4.3.3.1-intel-2017.00</br>4.3.3.1-foss-2015b</br>4.3.2-foss-2015g</br>4.3.2-intel-2015b | `-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`US-` |
+| ncurses | 5.9-gimkl-2.11.5</br>5.9-GCC-4.9.3-2.25</br>6.0-intel-2017a | `--A`</br>`--A`</br>`-S-` |
+| netcdf | 4.2.1.1</br>4.3.0 | `--A`</br>`--A` |
+| netCDF | 4.3.2-foss-2015g</br>4.3.2-intel-2015b</br>4.3.2-intel-2016.01</br>4.3.3.1-foss-2015b</br>4.3.3.1-intel-2017.00</br>4.4.0-intel-2017.00</br>4.4.1-intel-2017a | `-S-`</br>`US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-` |
 | netcdf-cxx | 4.2 | `--A` |
 | netcdf-fortran | 4.2 | `--A` |
 | netCDF-Fortran | 4.4.0-intel-2016.01 | `-S-` |
 | netcdf-parallel | 4.3.0 | `--A` |
 | ngsPipeline | 1.0.0 | `--A` |
-| numactl | 2.0.9-GCC-4.4.7-system</br>2.0.10-iccifort-2015.3.187-GNU-4.9.3-2.25</br>2.0.11-GCC-5.3.0-2.26</br>2.0.9</br>2.0.10-GNU-4.9.3-2.25</br>2.0.11-GCC-4.9.3-2.25</br>2.0.10-GNU-5.1.0-2.25</br>2.0.11</br>2.0.10 | `-S-`</br>`-SA`</br>`-SA`</br>`--A`</br>`USA`</br>`USA`</br>`USA`</br>`-SA`</br>`-SA` |
-| numpy | 1.8.2-intel-2015b-Python-2.7.9</br>1.8.2-intel-2015b-Python-2.7.11</br>1.9.1-intel-2015b-Python-2.7.9</br>1.8.2-intel-2016.01-Python-2.7.9 | `-SA`</br>`-SA`</br>`-S-`</br>`-SA` |
+| numactl | 2.0.9</br>2.0.9-GCC-4.4.7-system</br>2.0.10</br>2.0.10-iccifort-2015.3.187-GNU-4.9.3-2.25</br>2.0.10-GNU-4.9.3-2.25</br>2.0.10-GNU-5.1.0-2.25</br>2.0.11</br>2.0.11-GCCcore-5.4.0</br>2.0.11-GCC-4.9.3-2.25</br>2.0.11-GCC-5.3.0-2.26</br>2.0.11-GCC-6.2.0-2.27</br>2.0.11-GCC-6.3.0-2.27 | `--A`</br>`-S-`</br>`-SA`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-SA` |
+| numpy | 1.8.2-intel-2015b-Python-2.7.9</br>1.8.2-intel-2015b-Python-2.7.11</br>1.8.2-intel-2016.01-Python-2.7.9</br>1.9.1-intel-2015b-Python-2.7.9 | `-SA`</br>`-SA`</br>`-SA`</br>`-S-` |
 | NWChem | 6.3.revision2-2013-10-17-Python-2.7.8</br>6.5.revision26243-intel-2015b-2014-09-10-Python-2.7.8 | `-S-`</br>`-S-` |
-| nwchem | 6.3-rev2-patch1</br>6.3-rev2-patch1-venus</br>6.3-rev2-patch1-openmpi</br>6.1.1 | `--A`</br>`--A`</br>`--A`</br>`--A` |
-| Octave | 3.8.2-gimkl-2.11.5</br>3.8.2-foss-2015g</br>4.0.1-gimkl-2.11.5</br>4.0.0-foss-2015g</br>3.8.2-intel-2015b | `--A`</br>`-S-`</br>`--A`</br>`-SA`</br>`-S-` |
+| nwchem | 6.1.1</br>6.3-rev2-patch1</br>6.3-rev2-patch1-openmpi</br>6.3-rev2-patch1-venus | `--A`</br>`--A`</br>`--A`</br>`--A` |
+| Octave | 3.8.2-foss-2015g</br>3.8.2-gimkl-2.11.5</br>3.8.2-intel-2015b</br>4.0.0-foss-2015g</br>4.0.1-gimkl-2.11.5 | `-S-`</br>`--A`</br>`-S-`</br>`-SA`</br>`--A` |
 | opari2 | 1.1.2-gcc</br>1.1.2-icc | `--A`</br>`--A` |
 | OPARI2 | 1.1.4-intel-2015b</br>2.0 | `-S-`</br>`-SA` |
-| OpenBLAS | 0.2.15-GCC-4.9.3-2.25-LAPACK-3.6.0</br>0.2.14-GNU-5.1.0-2.25-LAPACK-3.5.0</br>0.2.14-gompi-2015e-LAPACK-3.5.0</br>0.2.18-GCC-5.3.0-2.26-LAPACK-3.6.0</br>0.2.19-GCC-5.4.0-2.26-LAPACK-3.6.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0</br>0.2.14-LAPACK-3.5.0</br>0.2.14-GNU-4.9.3-2.25-LAPACK-3.5.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0-gompi-2016a | `USA`</br>`USA`</br>`US-`</br>`-SA`</br>`-S-`</br>`--A`</br>`-S-`</br>`USA`</br>`--A` |
+| OpenBLAS | 0.2.14-gompi-2015e-LAPACK-3.5.0</br>0.2.14-GNU-4.9.3-2.25-LAPACK-3.5.0</br>0.2.14-GNU-5.1.0-2.25-LAPACK-3.5.0</br>0.2.14-LAPACK-3.5.0</br>0.2.15-GCC-4.9.3-2.25-LAPACK-3.6.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0-gompi-2016a</br>0.2.18-GCC-5.3.0-2.26-LAPACK-3.6.0</br>0.2.19-GCC-5.4.0-2.26-LAPACK-3.6.0</br>0.2.19-GCC-6.3.0-2.27-LAPACK-3.7.0 | `US-`</br>`USA`</br>`USA`</br>`-S-`</br>`USA`</br>`--A`</br>`--A`</br>`-SA`</br>`-S-`</br>`-SA` |
 | OpenCL-builder | 2015 | `-S-` |
 | opencl-rt | 4.5.0.8 | `--A` |
 | OpenCL-runtime | 15.1 | `-S-` |
 | opencl-sdk | 4.6.0.92 | `--A` |
-| OpenCoarrays | 1.4.0-GCC-5.3.0-2.25</br>1.0.1-GNU-5.1.0-2.25</br>1.4.0-GCC-5.3.1-snapshot-20160419-2.25</br>1.0.0-GNU-5.1.0-2.25 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| OpenCoarrays | 1.0.0-GNU-5.1.0-2.25</br>1.0.1-GNU-5.1.0-2.25</br>1.4.0-GCC-5.3.0-2.25</br>1.4.0-GCC-5.3.1-snapshot-20160419-2.25 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
 | OpenCV | 2.4.9-intel-2015b</br>3.0.0-intel-2015b | `-S-`</br>`-S-` |
 | OpenDX | 4.4.4-foss-2015g | `-S-` |
-| openfoam | 2.2.1-gcc481-openmpi1.6.5-DP</br>2.2.2-icc-openmpi1.8.1-DP</br>2.2.1-icc-openmpi1.6.5-DP</br>2.2.1-gcc481-openmpi1.6.5-SP</br>2.2.1-icc-impi4.1.1.036-DP | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| openfoam | 2.2.1-gcc481-openmpi1.6.5-DP</br>2.2.1-gcc481-openmpi1.6.5-SP</br>2.2.1-icc-impi4.1.1.036-DP</br>2.2.1-icc-openmpi1.6.5-DP</br>2.2.2-icc-openmpi1.8.1-DP | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
 | OpenFOAM | 2.2.2-intel-2015b</br>2.3.0-intel-2015b</br>3.0.0-intel-2016.01 | `US-`</br>`US-`</br>`-S-` |
-| openmpi | 1.6.5-gcc46</br>1.8.1-gcc</br>1.6.5-gcc</br>1.6.5-icc</br>1.8.1-gcc49</br>1.8.1-icc</br>1.8.1-gcc46 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
-| OpenMPI | 1.8.6-iccifort-2015.3.187-GNU-5.1.0-2.25</br>1.8.6-GCC-4.4.7-system</br>1.10.1-GNU-4.9.3-2.25</br>1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.10.2-GCC-4.9.3-2.25</br>1.8.8-GNU-5.1.0-2.25</br>1.10.2-GCC-5.3.0-2.26</br>1.10.1-GCC-4.9.3-2.25</br>1.8.6-GNU-5.1.0-2.25</br>1.8.8-GNU-4.9.3-2.25 | `-S-`</br>`US-`</br>`US-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`US-`</br>`USA` |
+| openmpi | 1.6.5-gcc</br>1.6.5-gcc46</br>1.6.5-icc</br>1.8.1-gcc</br>1.8.1-gcc46</br>1.8.1-gcc49</br>1.8.1-icc | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| OpenMPI | 1.8.6-iccifort-2015.3.187-GNU-5.1.0-2.25</br>1.8.6-GCC-4.4.7-system</br>1.8.6-GNU-5.1.0-2.25</br>1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.8.8-GNU-4.9.3-2.25</br>1.8.8-GNU-5.1.0-2.25</br>1.10.1-GCC-4.9.3-2.25</br>1.10.1-GNU-4.9.3-2.25</br>1.10.2-GCC-4.9.3-2.25</br>1.10.2-GCC-5.3.0-2.26</br>2.0.1-iccifort-2017.1.132-GCC-5.4.0-2.26</br>2.0.2-GCC-6.3.0-2.27 | `-S-`</br>`US-`</br>`US-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`US-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-SA` |
 | openssh-x509 | 6.2p2 | `--A` |
 | ORCA | 3_0_3-linux_x86-64 | `-SA` |
 | oscar-modules | 1.0.3 | `-S-` |
 | OSPRay | 0.9.1 | `-S-` |
 | OTF2 | 1.4-intel-2015b</br>2.0</br>2.0-intel-2015b-mic | `-S-`</br>`-SA`</br>`-S-` |
-| otf2 | 1.4-icc</br>1.2.1-gcc</br>1.4-gcc</br>1.2.1-icc | `--A`</br>`--A`</br>`--A`</br>`--A` |
+| otf2 | 1.2.1-gcc</br>1.2.1-icc</br>1.4-gcc</br>1.4-icc | `--A`</br>`--A`</br>`--A`</br>`--A` |
 | p4vasp | 0.3.29-GNU-4.9.3-2.25 | `-S-` |
-| PAPI | 5.4.3-pic</br>5.4.3</br>5.4.0-p-mic</br>5.4.0-intel-2015b</br>5.4.3-intel-2015b-mic</br>5.4.0-mic | `-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
-| papi | 5.3.2-mic</br>5.4.0</br>5.3.2</br>5.4.0-mic</br>5.3.0 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
-| parallel | 20150322-GNU-5.1.0-2.25</br>20141122</br>20150322 | `-S-`</br>`--A`</br>`-S-` |
-| ParaView | 5.0.0-binary</br>4.3-OSPRay | `-S-`</br>`-S-` |
+| PAPI | 5.4.0-intel-2015b</br>5.4.0-mic</br>5.4.0-p-mic</br>5.4.3</br>5.4.3-intel-2015b-mic</br>5.4.3-pic | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
+| papi | 5.3.0</br>5.3.2</br>5.3.2-mic</br>5.4.0</br>5.4.0-mic | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| parallel | 20141122</br>20150322</br>20150322-GNU-5.1.0-2.25 | `--A`</br>`-S-`</br>`-S-` |
+| ParaView | 4.3-OSPRay</br>5.0.0-binary | `-S-`</br>`-S-` |
 | paraview | 4.0.1-gcc481-bullxmpi1.2.4.1-osmesa10.0 | `--A` |
-| ParMETIS | 4.0.3-intel-2015b</br>4.0.3-intel-2016.01 | `-S-`</br>`-S-` |
-| PCRE | 8.37-foss-2015g</br>8.37-intel-2016.01</br>8.39-intel-2017.00</br>8.36-foss-2015g</br>8.36-intel-2015b</br>8.37</br>8.37-gimkl-2.11.5</br>8.36-intel-2016.01 | `-SA`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`--A`</br>`-S-` |
+| ParMETIS | 4.0.3-intel-2015b</br>4.0.3-intel-2016.01</br>4.0.3-intel-2017a | `-S-`</br>`-S-`</br>`-S-` |
+| PCRE | 8.36-foss-2015g</br>8.36-intel-2015b</br>8.36-intel-2016.01</br>8.37</br>8.37-foss-2015g</br>8.37-gimkl-2.11.5</br>8.37-intel-2016.01</br>8.39-intel-2017.00 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`--A`</br>`--A`</br>`-S-` |
 | PCRE2 | 10.22-intel-2017.00 | `-S-` |
 | perfboost | 1.0 | `-S-` |
 | perfcatcher | 1.0 | `-S-` |
 | PerformanceReports | 5.1-43967</br>6.0.6 | `-S-`</br>`-SA` |
 | PerfReports | 5.0.1 | `-S-` |
 | perfsuite | 1a5.3 | `-S-` |
-| Perl | 5.20.2-GNU-4.9.3-2.25-bare</br>5.22.2-intel-2017.00</br>5.20.2-bare</br>5.16.3-intel-2015b</br>5.24.0-GCC-4.9.3-2.25-bare | `-SA`</br>`US-`</br>`-S-`</br>`US-`</br>`--A` |
+| Perl | 5.16.3-intel-2015b</br>5.20.2-bare</br>5.20.2-GNU-4.9.3-2.25-bare</br>5.22.2-intel-2017.00</br>5.24.0-GCC-4.9.3-2.25-bare | `US-`</br>`-S-`</br>`-SA`</br>`US-`</br>`--A` |
 | pest | 13.0 | `--A` |
-| petsc | 3.5.3-icc15-impi-mkl-threads-opt</br>3.7.3-icc16-impi5-mkl-opt</br>3.5.3-icc15-impi-mkl-threads-dbg</br>3.5.3-icc15-impi-mkl-dbg</br>3.5.3-icc15-impi-mkl-opt</br>3.7.3-icc16-impi5-mkl-dbg | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| petsc | 3.5.3-icc15-impi-mkl-dbg</br>3.5.3-icc15-impi-mkl-opt</br>3.5.3-icc15-impi-mkl-threads-dbg</br>3.5.3-icc15-impi-mkl-threads-opt</br>3.7.3-icc16-impi5-mkl-dbg</br>3.7.3-icc16-impi5-mkl-opt | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
 | PETSc | 3.6.1-intel-2015b-Python-2.7.9</br>3.6.1-intel-2016.01-Python-2.7.9</br>3.6.3-intel-2015b-Python-2.7.11 | `-S-`</br>`-S-`</br>`-S-` |
-| PGI | 16.10-GNU-4.9.3-2.25</br>15.7 | `-S-`</br>`-S-` |
-| phono3py | 1.11.7.8-intel-2015b-Python-2.7.9</br>1.11.7.8-intel-2015b-Python-2.7.11 | `-S-`</br>`--A` |
+| PGI | 15.7</br>16.10-GNU-4.9.3-2.25 | `-S-`</br>`-S-` |
+| phono3py | 0.9.14-ictce-7.3.5-Python-2.7.9</br>1.11.7.8-intel-2015b-Python-2.7.9</br>1.11.7.8-intel-2015b-Python-2.7.11 | `-S-`</br>`-S-`</br>`--A` |
 | phonopy | 1.11.6.7-intel-2015b-Python-2.7.9</br>1.11.6.7-intel-2015b-Python-2.7.11 | `-S-`</br>`--A` |
-| picard | 2.1.0</br>1.117</br>1.119 | `-S-`</br>`--A`</br>`-S-` |
+| picard | 1.117</br>1.119</br>2.1.0 | `--A`</br>`-S-`</br>`-S-` |
 | pigz | 2.3.3-GCC-6.2.0-2.27 | `US-` |
-| pixman | 0.32.6-intel-2015b</br>0.32.6-foss-2015b</br>0.32.6 | `-S-`</br>`-S-`</br>`-SA` |
-| pkg-config | 0.29.1-foss-2016a</br>0.29</br>0.27.1-foss-2015g</br>0.27.1-foss-2015b</br>0.29.1-intel-2016a</br>0.29-foss-2016a</br>0.27.1-intel-2015b</br>0.27.1</br>0.29-intel-2016a | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-SA`</br>`-S-` |
+| pixman | 0.32.6</br>0.32.6-foss-2015b</br>0.32.6-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
+| pkg-config | 0.27.1</br>0.27.1-foss-2015b</br>0.27.1-foss-2015g</br>0.27.1-intel-2015b</br>0.29</br>0.29.1-foss-2016a</br>0.29.1-intel-2016a</br>0.29-foss-2016a</br>0.29-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
 | plasma | 2.6.0 | `--A` |
-| PLUMED | 2.3b-foss-2015g</br>2.2.1-intel-2015b</br>2.1.3-foss-2015g</br>2.3b-foss-2016a | `-S-`</br>`--A`</br>`-S-`</br>`--A` |
+| PLUMED | 2.1.3-foss-2015g</br>2.2.1-intel-2015b</br>2.3b-foss-2015g</br>2.3b-foss-2016a | `-S-`</br>`--A`</br>`-S-`</br>`-SA` |
 | prace | 20160107-intel-2016.01 | `-S-` |
-| PRACE | prace</br>20150630-intel-2015b | `--A`</br>`US-` |
-| PrgEnv-gnu | 4.8.1</br>4.4.6</br>4.4.6-test | `--A`</br>`--A`</br>`--A` |
-| PrgEnv-intel | 14.0.1</br>13.5.192</br>15.0.3 | `--A`</br>`--A`</br>`--A` |
-| PROJ | 4.9.2-intel-2017.00</br>4.8.0-foss-2015b | `-S-`</br>`US-` |
+| PRACE | 20150630-intel-2015b</br>prace | `US-`</br>`--A` |
+| PrgEnv-gnu | 4.4.6</br>4.4.6-test</br>4.8.1 | `--A`</br>`--A`</br>`--A` |
+| PrgEnv-intel | 13.5.192</br>14.0.1</br>15.0.3 | `--A`</br>`--A`</br>`--A` |
+| PROJ | 4.8.0-foss-2015b</br>4.9.2-intel-2017.00 | `US-`</br>`-S-` |
 | PROJ_4 | 4.9.2-foss-2015g | `-S-` |
 | PSBLAS | 3.3.4-3-GCC-4.9.3-2.25 | `--A` |
 | PSBLAS-ext | 1.0-4-GCC-4.9.3-2.25 | `--A` |
 | PyQt | 4.11.3-foss-2015g-Python-2.7.9</br>4.11.4-foss-2015g-Python-2.7.9 | `-S-`</br>`-S-` |
-| python | 3.4.2</br>2.7.5</br>2.7.6</br>3.3.2</br>3.3.5 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
-| Python | 2.7.11-intel-2015b</br>2.7.10-GNU-4.9.3-2.25-bare</br>2.7.8-intel-2015b</br>3.5.2</br>2.7.9-foss-2015g</br>2.7.9-foss-2015b</br>3.5.2-intel-2017.00</br>3.5.1-intel-2017.00</br>2.7.9-GNU-5.1.0-2.25</br>2.7.9-intel-2015b</br>3.4.3-intel-2015b</br>2.7.9-gompi-2015e</br>2.7.9-ictce-7.3.5</br>2.7.11-GCC-4.9.3-2.25-bare</br>2.7.8-intel-2016.01</br>2.7.11-intel-2016a</br>2.7.9</br>2.7.9-intel-2016.01</br>3.5.1</br>2.7.11-foss-2016a</br>3.5.2-foss-2016a</br>3.5.1-intel-2016.01</br>2.7.10-GCC-4.9.3-2.25-bare</br>2.7.11-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`US-`</br>`USA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-S-`</br>`US-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| python | 2.7.5</br>2.7.6</br>3.3.2</br>3.3.5</br>3.4.2 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| Python | 2.7.8-intel-2015b</br>2.7.8-intel-2016.01</br>2.7.9</br>2.7.9-foss-2015b</br>2.7.9-foss-2015g</br>2.7.9-gompi-2015e</br>2.7.9-ictce-7.3.5</br>2.7.9-intel-2015b</br>2.7.9-intel-2016.01</br>2.7.9-GNU-5.1.0-2.25</br>2.7.10-GCC-4.9.3-2.25-bare</br>2.7.10-GNU-4.9.3-2.25-bare</br>2.7.11-foss-2016a</br>2.7.11-intel-2015b</br>2.7.11-intel-2016a</br>2.7.11-intel-2017.00</br>2.7.11-intel-2017a</br>2.7.11-GCC-4.9.3-2.25-bare</br>3.4.3-intel-2015b</br>3.5.1</br>3.5.1-intel-2016.01</br>3.5.1-intel-2017.00</br>3.5.2</br>3.5.2-foss-2016a</br>3.5.2-intel-2017.00 | `-S-`</br>`-S-`</br>`-SA`</br>`US-`</br>`USA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA` |
 | python-meep | 1.4.2-intel-2015b-Python-2.7.9-Meep-1.3 | `-S-` |
 | PyYAML | 3.11-intel-2015b-Python-2.7.9 | `-S-` |
 | QCA | 2.1.0-foss-2015g | `-S-` |
 | QEMU | 2.1.2-foss-2015b</br>2.1.2-GCC-4.4.7-system</br>2.1.2-GCC-4.4.7-system-VDE2 | `U--`</br>`US-`</br>`US-` |
-| qemu | 2.1.2</br>2.1.0</br>2.1.2-vde2</br>2.1.0-vde2 | `--A`</br>`--A`</br>`--A`</br>`--A` |
+| qemu | 2.1.0</br>2.1.0-vde2</br>2.1.2</br>2.1.2-vde2 | `--A`</br>`--A`</br>`--A`</br>`--A` |
 | QGIS | 2.12.3-foss-2015g | `-S-` |
-| Qt | 4.8.6-foss-2015g</br>4.8.6 | `-S-`</br>`-SA` |
-| QuantumESPRESSO | 5.4.0-intel-2017.00 | `-SA` |
+| Qt | 4.8.6</br>4.8.6-foss-2015g | `-SA`</br>`-S-` |
+| QuantumESPRESSO | 5.4.0-intel-2017.00</br>6.0-intel-2017a | `-SA`</br>`-S-` |
 | Qwt | 6.1.2-foss-2015g | `-S-` |
-| R | 3.1.1-intel-2015b</br>3.1.1</br>3.2.3-intel-2016.01</br>3.2.3-foss-2015b</br>3.0.1 | `US-`</br>`--A`</br>`-S-`</br>`US-`</br>`--A` |
+| R | 3.0.1</br>3.1.1</br>3.1.1-intel-2015b</br>3.2.3-foss-2015b</br>3.2.3-intel-2016.01 | `--A`</br>`--A`</br>`US-`</br>`US-`</br>`-S-` |
 | Racket | 6.1.1-GNU-5.1.0-2.25 | `-S-` |
 | racket | 6.0.1 | `--A` |
 | relion | 1.2</br>1.3 | `--A`</br>`--A` |
 | RELION | 1.3-intel-2015b | `-S-` |
-| renderproto | 0.11-intel-2015b</br>0.11 | `-S-`</br>`-SA` |
+| renderproto | 0.11</br>0.11-intel-2015b | `-SA`</br>`-S-` |
 | Rstudio | 0.97 | `--A` |
 | RStudio | 0.98.1103 | `-S-` |
 | ruby | 2.0.0-p247 | `--A` |
@@ -339,22 +346,23 @@
 | S4MPLE | 1.0.0 | `-S-` |
 | samtools | 0.1.19 | `--A` |
 | SAMtools | 1.3-foss-2015g | `-S-` |
-| ScaLAPACK | 2.0.2-gompi-2015g-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2016a-OpenBLAS-0.2.15-LAPACK-3.6.0</br>2.0.2-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2015e-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2015b-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2016.04-OpenBLAS-0.2.18-LAPACK-3.6.0 | `USA`</br>`USA`</br>`-S-`</br>`US-`</br>`US-`</br>`-SA` |
+| ScaLAPACK | 2.0.2-gompi-2015b-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2015e-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2015g-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2016.04-OpenBLAS-0.2.18-LAPACK-3.6.0</br>2.0.2-gompi-2016a-OpenBLAS-0.2.15-LAPACK-3.6.0</br>2.0.2-gompi-2017a-OpenBLAS-0.2.19-LAPACK-3.7.0</br>2.0.2-OpenBLAS-0.2.14-LAPACK-3.5.0 | `US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA`</br>`-S-` |
 | Scalasca | 2.3.1-intel-2015b | `-S-` |
-| scalasca2 | 2.0-icc-impi</br>2.0-gcc-openmpi | `--A`</br>`--A` |
+| scalasca2 | 2.0-gcc-openmpi</br>2.0-icc-impi | `--A`</br>`--A` |
 | ScientificPython | 2.9.4-intel-2015b-Python-2.7.9</br>2.9.4-intel-2015b-Python-2.7.11</br>2.9.4-intel-2016.01-Python-2.7.9 | `-SA`</br>`-SA`</br>`-SA` |
-| Scipion | 1.0.1-Java-8u112-intel-2017.00 | `-S-` |
+| Scipion | 1.0.1-Java-1.8.0_112-OpenMPI-1.10.2-GCC-5.3.0-2.26 | `-S-` |
 | scite | 3.4.3 | `--A` |
 | SCons | 2.3.6-foss-2015g-Python-2.7.9</br>2.3.6-Python-2.7.9 | `-SA`</br>`-S-` |
 | Score-P | 3.0-intel-2015b | `-S-` |
 | scorep | 1.2.3-gcc-openmpi</br>1.2.3-icc-impi | `--A`</br>`--A` |
-| SCOTCH | 5.1.12b_esmumps-foss-2015b</br>6.0.4-intel-2016.01</br>6.0.3-intel-2016.01</br>6.0.0_esmumps-intel-2015b</br>6.0.4-intel-2015b</br>6.0.3-intel-2015b | `-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`US-` |
+| SCOTCH | 5.1.12b_esmumps-foss-2015b</br>6.0.0_esmumps-intel-2015b</br>6.0.3-intel-2015b</br>6.0.3-intel-2016.01</br>6.0.4-intel-2015b</br>6.0.4-intel-2016.01</br>6.0.4-intel-2017a | `-S-`</br>`US-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
 | SDE | 7.41.0 | `-S-` |
 | Serf | 1.3.8-foss-2015g | `-SA` |
 | settarg | 7.2.2 | `USA` |
 | Siesta | 4.1-b2-intel-2017.00 | `-S-` |
+| Singularity | 2.2.1-GCC-6.3.0-2.27</br>2.2-GCC-6.3.0-2.27 | `-SA`</br>`--A` |
 | SIONlib | 1.6.1-intel-2015b-tools</br>1.6.1-tools | `-S-`</br>`-SA` |
-| SIP | 4.17-foss-2015g-Python-2.7.9</br>4.17-Python-2.7.9</br>4.16.4-foss-2015g-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
+| SIP | 4.16.4-foss-2015g-Python-2.7.9</br>4.17-foss-2015g-Python-2.7.9</br>4.17-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
 | slepc | 3.4.4-icc15-impi-mkl-dbg</br>3.4.4-icc15-impi-mkl-opt</br>3.7.2-icc16-impi5-mkl-dbg</br>3.7.2-icc16-impi5-mkl-opt | `--A`</br>`--A`</br>`--A`</br>`--A` |
 | snpEff | 3.6 | `--A` |
 | SnpEff | 4.1_G | `US-` |
@@ -363,49 +371,53 @@
 | spatialindex | 1.8.5-foss-2015g | `-S-` |
 | SpatiaLite | 4.3.0a-foss-2015g | `-S-` |
 | spGPU | master-GCC-4.9.3-2.25 | `--A` |
-| SQLite | 3.8.8.1-foss-2016a | `--A` |
+| SQLite | 3.8.8.1-foss-2016a</br>3.13.0-intel-2017a | `-SA`</br>`-S-` |
 | Subversion | 1.8.16-foss-2015g | `-SA` |
-| SuiteSparse | 4.4.6-intel-2015b-ParMETIS-4.0.3</br>4.5.3-intel-2017.00-METIS-5.1.0</br>4.4.5-intel-2015b-METIS-5.1.0</br>4.4.3-intel-2016.01-ParMETIS-4.0.3</br>4.4.3-intel-2015b-ParMETIS-4.0.3 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
-| SUMO | 0.27.1-foss-2015g</br>0.25.0-foss-2015g</br>0.26.0-foss-2015g | `-S-`</br>`-S-`</br>`-S-` |
+| SuiteSparse | 4.4.3-intel-2015b-ParMETIS-4.0.3</br>4.4.3-intel-2016.01-ParMETIS-4.0.3</br>4.4.5-intel-2015b-METIS-5.1.0</br>4.4.6-intel-2015b-ParMETIS-4.0.3</br>4.5.3-intel-2017.00-METIS-5.1.0</br>4.5.3-intel-2017a-ParMETIS-4.0.3 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| SUMO | 0.25.0-foss-2015g</br>0.26.0-foss-2015g</br>0.27.1-foss-2015g | `-S-`</br>`-S-`</br>`-S-` |
 | SWIG | 2.0.12-intel-2015b-Python-2.7.9</br>2.0.12-Python-2.7.9</br>3.0.7-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
-| sympy | 0.7.6.1-intel-2015b-Python-2.7.11</br>0.7.6-intel-2016.01-Python-2.7.9</br>0.7.6-intel-2015b-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
-| Szip | 2.1-ictce-7.3.5</br>2.1-intel-2015b</br>2.1-intel-2017.00</br>2.1-intel-2016.01</br>2.1</br>2.1-foss-2016a</br>2.1-foss-2015b</br>2.1-foss-2015g | `-S-`</br>`USA`</br>`-S-`</br>`-SA`</br>`-SA`</br>`USA`</br>`US-`</br>`-S-` |
+| sympy | 0.7.6.1-intel-2015b-Python-2.7.11</br>0.7.6-intel-2015b-Python-2.7.9</br>0.7.6-intel-2016.01-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
+| Szip | 2.1</br>2.1-foss-2015b</br>2.1-foss-2015g</br>2.1-foss-2016a</br>2.1-ictce-7.3.5</br>2.1-intel-2015b</br>2.1-intel-2016.01</br>2.1-intel-2017.00</br>2.1-intel-2017a | `-SA`</br>`US-`</br>`-S-`</br>`USA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-` |
 | szip | 2.1 | `--A` |
-| tbb | 15.2.164</br>4.4.2.152</br>4.3.5.187</br>14.0.1</br>15.3.187</br>13.5.192 | `--A`</br>`USA`</br>`-S-`</br>`--A`</br>`--A`</br>`--A` |
-| Tcl | 8.6.3-foss-2016a | `--A` |
+| tbb | 4.3.5.187</br>4.4.2.152</br>13.5.192</br>14.0.1</br>15.2.164</br>15.3.187 | `-S-`</br>`USA`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
+| Tcl | 8.6.3-foss-2016a</br>8.6.5-intel-2017a | `-SA`</br>`-S-` |
 | tcl | 8.5.15 | `--A` |
 | tcsh | 6.18.01-intel-2015b</br>6.19.00 | `US-`</br>`-SA` |
+| tensorflow | 0.12.0 | `-S-` |
 | tk | 8.5.15 | `--A` |
-| Tk | 8.6.3-foss-2015b-no-X11 | `U--` |
-| totalview | 8.13</br>8.12 | `--A`</br>`--A` |
+| Tk | 8.6.3-foss-2015b-no-X11</br>8.6.5-intel-2017a | `U--`</br>`-S-` |
+| tmux | 2.3 | `-SA` |
+| totalview | 8.12</br>8.13 | `--A`</br>`--A` |
 | TotalView | 8.15.4-6-linux-x86-64 | `-S-` |
-| trilinos | 11.2.3-gcc-openmpi-mkl-opt</br>11.2.3-gcc-openmpi-mkl-dbg</br>11.2.3-icc | `--A`</br>`--A`</br>`--A` |
+| trilinos | 11.2.3-gcc-openmpi-mkl-dbg</br>11.2.3-gcc-openmpi-mkl-opt</br>11.2.3-icc | `--A`</br>`--A`</br>`--A` |
 | Trimmomatic | 0.35-Java-1.7.0_79 | `-S-` |
 | turbovnc | 1.2.2</br>1.2.3 | `--A`</br>`-S-` |
-| util-linux | 2.28-intel-2016a</br>2.26.1</br>2.26.1-foss-2015g | `--A`</br>`-SA`</br>`-S-` |
+| util-linux | 2.26.1</br>2.26.1-foss-2015g</br>2.28-intel-2016a | `-SA`</br>`-S-`</br>`--A` |
 | Valgrind | 3.11.0-foss-2015b</br>3.11.0-intel-2015b | `-S-`</br>`-S-` |
 | valgrind | 3.9.0-impi | `--A` |
-| Vampir | 9.0.0</br>8.5.0 | `-S-`</br>`-SA` |
+| Vampir | 8.5.0</br>9.0.0 | `-SA`</br>`-S-` |
 | vampir | 8.2 | `--A` |
-| VampirServer | 9.0.0-intel-2015b</br>8.5.0-intel-2015b | `-S-`</br>`-S-` |
-| VASP | 5.4.1-intel-2015b-24Jun15-UV</br>5.4.1-intel-2017.00-24Jun15</br>5.4.1-intel-2015b-24Jun15 | `U--`</br>`-SA`</br>`-SA` |
+| VampirServer | 8.5.0-intel-2015b</br>9.0.0-intel-2015b | `-S-`</br>`-S-` |
+| VASP | 5.4.1-intel-2015b-24Jun15</br>5.4.1-intel-2015b-24Jun15-UV</br>5.4.1-intel-2017.00-24Jun15 | `-SA`</br>`U--`</br>`-SA` |
 | vde2 | 2.3.2 | `--A` |
 | VDE2 | 2.3.2-GCC-4.4.7-system | `US-` |
 | VirtualGL | 2.4.1 | `-S-` |
 | virtualgl | 2.4 | `--A` |
 | VisIt | 2.10.0 | `US-` |
-| VTune | 2016_update1 | `USA` |
+| VTune | 2016_update1</br>2017_update2 | `USA`</br>`-SA` |
 | vtune_xe | 2013.15</br>2015.3.0.403110 | `--A`</br>`--A` |
-| wien2k | 14.2</br>13.1 | `--A`</br>`--A` |
+| wien2k | 13.1</br>14.2 | `--A`</br>`--A` |
 | wine | 1.7.29 | `--A` |
-| Wine | 1.7.29-GNU-5.1.0-2.25</br>1.7.29-GCC-4.4.7-system | `-S-`</br>`-S-` |
-| xbitmaps | 1.1.1</br>1.1.1-intel-2015b</br>1.1.1-foss-2015g | `-SA`</br>`-S-`</br>`-S-` |
-| xcb-proto | 1.11-Python-2.7.9</br>1.11-foss-2015g-Python-2.7.9</br>1.10-Python-2.7.8</br>1.11-intel-2015b-Python-2.7.9</br>1.11 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
-| xdrfile | 1.1.4-foss-2015g</br>1.1.4-intel-2015b</br>1.1.4-foss-2016a | `-SA`</br>`--A`</br>`--A` |
-| xextproto | 7.3.0-intel-2016a</br>7.3.0-foss-2015g</br>7.3.0-intel-2015b</br>7.3.0</br>7.3.0-foss-2016a | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
-| xineramaproto | 1.2.1-intel-2015b</br>1.2.1 | `-S-`</br>`-SA` |
-| xorg-macros | 1.17</br>1.17-foss-2015g</br>1.19.0-intel-2016a</br>1.19.0-foss-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
-| xproto | 7.0.26-foss-2015g</br>7.0.28-foss-2016a</br>7.0.28-intel-2016a</br>7.0.28</br>7.0.26-intel-2015b</br>7.0.26 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
-| xtrans | 1.3.5-foss-2016a</br>1.3.4-intel-2015b</br>1.3.5-intel-2015b</br>1.3.5-intel-2016a</br>1.3.5</br>1.3.4</br>1.3.5-foss-2015g | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`--A`</br>`-S-` |
-| XZ | 5.2.2</br>5.2.2-intel-2016.01</br>5.2.2-intel-2017.00</br>5.2.2-foss-2016a | `-SA`</br>`-S-`</br>`USA`</br>`USA` |
-| zlib | 1.2.5</br>1.2.8-GCC-4.9.3</br>1.2.8-GCC-4.4.7-system | `--A`</br>`--A`</br>`U--` |
+| Wine | 1.7.29-GCC-4.4.7-system</br>1.7.29-GNU-5.1.0-2.25 | `-S-`</br>`-S-` |
+| xbitmaps | 1.1.1</br>1.1.1-foss-2015g</br>1.1.1-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
+| xcb-proto | 1.10-Python-2.7.8</br>1.11</br>1.11-foss-2015g-Python-2.7.9</br>1.11-intel-2015b-Python-2.7.9</br>1.11-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
+| xdrfile | 1.1.4-foss-2015g</br>1.1.4-foss-2016a</br>1.1.4-intel-2015b | `-SA`</br>`-SA`</br>`--A` |
+| xextproto | 7.3.0</br>7.3.0-foss-2015g</br>7.3.0-foss-2016a</br>7.3.0-intel-2015b</br>7.3.0-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| xineramaproto | 1.2.1</br>1.2.1-intel-2015b | `-SA`</br>`-S-` |
+| xorg-macros | 1.17</br>1.17-foss-2015g</br>1.19.0-foss-2016a</br>1.19.0-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
+| xproto | 7.0.26</br>7.0.26-foss-2015g</br>7.0.26-intel-2015b</br>7.0.28</br>7.0.28-foss-2016a</br>7.0.28-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| xtrans | 1.3.4</br>1.3.4-intel-2015b</br>1.3.5</br>1.3.5-foss-2015g</br>1.3.5-foss-2016a</br>1.3.5-intel-2015b</br>1.3.5-intel-2016a | `--A`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
+| XZ | 5.2.2</br>5.2.2-foss-2016a</br>5.2.2-intel-2016.01</br>5.2.2-intel-2017.00 | `-SA`</br>`USA`</br>`-S-`</br>`USA` |
+| zlib | 1.2.5</br>1.2.8-GCC-4.4.7-system</br>1.2.8-GCC-4.9.3</br>1.2.11-GCCcore-6.3.0 | `--A`</br>`U--`</br>`--A`</br>`-SA` |
+
+---8<--- "modules_matrix_search.md"
diff --git a/docs.it4i/modules-salomon-uv.md b/docs.it4i/modules-salomon-uv.md
index a1da8f4a2ad238ca622c8355a4c4ca9cc1e40b6d..1b0f652a5e4c33f96285dde9231fc6b3e05906c1 100644
--- a/docs.it4i/modules-salomon-uv.md
+++ b/docs.it4i/modules-salomon-uv.md
@@ -144,6 +144,7 @@
 | [expat](http://expat.sourceforge.net/) | Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags) |
 | git | &nbsp; |
 | [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress |
+| help2man | &nbsp; |
 | MATLAB | &nbsp; |
 | [Mercurial](http://mercurial.selenic.com/) | Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface. |
 | [numactl](http://oss.sgi.com/projects/libnuma/) | The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program. |
diff --git a/docs.it4i/modules-salomon.md b/docs.it4i/modules-salomon.md
index 65a3a3a626b9d9f264dafe5f5cc6508646a1b426..088097b961c8abe8040eeb929f94bfd84b7ab5ad 100644
--- a/docs.it4i/modules-salomon.md
+++ b/docs.it4i/modules-salomon.md
@@ -31,7 +31,6 @@
 | Module | Description |
 | ------ | ----------- |
 | Adams | &nbsp; |
-| ANSYS | &nbsp; |
 | COMSOL | &nbsp; |
 | Digimat | &nbsp; |
 | [FreeFem++](http://www.freefem.org) | FreeFem++ is a partial differential equation solver. It has its own language. freefem scripts can solve multiphysics non linear systems in 2D and 3D. Problems involving PDE (2d, 3d) from several branches of physics such as fluid-structure interactions require interpolations of data on several meshes and their manipulation within one program. FreeFem++ includes a fast 2^d-tree-based interpolation algorithm and a language for the manipulation of data on multiple meshes (as a follow up of bamg (now a part of FreeFem++ ). FreeFem++ is written in C++ and the FreeFem++ language is a C++ idiom. It runs on Macs, Windows, Unix machines. FreeFem++ replaces the older freefem and freefem+. |
@@ -169,7 +168,9 @@
 | Module | Description |
 | ------ | ----------- |
 | [FOX](http://fox-toolkit.org) | FOX is a C++ based Toolkit for developing Graphical User Interfaces easily and effectively. It offers a wide, and growing, collection of Controls, and provides state of the art facilities such as drag and drop, selection, as well as OpenGL widgets for 3D graphical manipulation. |
+| GLM | &nbsp; |
 | [libdrm](http://dri.freedesktop.org) | Direct Rendering Manager runtime library. |
+| libevent | &nbsp; |
 | [libffi](http://sourceware.org/libffi/) | The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time. |
 | [libfontenc](http://www.freedesktop.org/wiki/Software/xlibs/) | X11 font encoding library |
 | [libjpeg-turbo](http://sourceforge.net/libjpeg-turbo/) | libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding. |
@@ -187,6 +188,7 @@
 | libxslt | &nbsp; |
 | libyaml | &nbsp; |
 | lxml | &nbsp; |
+| MATIO | &nbsp; |
 | [OpenCoarrays](http://www.opencoarrays.org) | OpenCoarrays is an open-source software project for developing, porting and tuning transport layers that support coarray Fortran compilers. |
 | [PROJ](http://trac.osgeo.org/proj/) | Program proj is a standard Unix filter function which converts geographic longitude and latitude coordinates into cartesian coordinates |
 | PyYAML | &nbsp; |
@@ -197,6 +199,7 @@
 | [spatialindex](https://libspatialindex.github.io/index.html) | The purpose of this library is to provide: * An extensible framework that will support robust spatial indexing methods. * Support for sophisticated spatial queries. Range, point location, nearest neighbor and k-nearest neighbor as well as parametric queries (defined by spatial constraints) should be easy to deploy and run. * Easy to use interfaces for inserting, deleting and updating information. |
 | [SpatiaLite](https://www.gaia-gis.it/fossil/libspatialite/index) | SpatiaLite is an open source library intended to extend the SQLite core to support fully fledged Spatial SQL capabilities. |
 | [tbb](http://software.intel.com/en-us/articles/intel-tbb/) | Intel Threading Building Blocks 4.0 (Intel TBB) is a widely used, award-winning C++ template library for creating reliable, portable, and scalable parallel applications. Use Intel TBB for a simple and rapid way of developing robust task-based parallel applications that scale to available processor cores, are compatible with multiple environments, and are easier to maintain. Intel TBB is the most proficient way to implement future-proof parallel applications that tap into the power and performance of multicore and manycore hardware platforms. |
+| [tensorflow](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. |
 | [zlib](http://www.zlib.net/) | zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system. |
 
 ## Math
@@ -211,6 +214,7 @@
 | [METIS](http://glaros.dtc.umn.edu/gkhome/metis/metis/overview) | METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill reducing orderings for sparse matrices. The algorithms implemented in METIS are based on the multilevel recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes. |
 | MPC | &nbsp; |
 | [MPFR](http://www.mpfr.org) | The MPFR library is a C library for multiple-precision floating-point computations with correct rounding. |
+| MUMPS | &nbsp; |
 | [numpy](http://www.numpy.org) | NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object, sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code, useful linear algebra, Fourier transform, and random number capabilities. Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases. |
 | [Octave](http://www.gnu.org/software/octave/) | GNU Octave is a high-level interpreted language, primarily intended for numerical computations. |
 | [ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview) | ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured graphs, meshes, and for computing fill-reducing orderings of sparse matrices. ParMETIS extends the functionality provided by METIS and includes routines that are especially suited for parallel AMR computations and large scale numerical simulations. The algorithms implemented in ParMETIS are based on the parallel multilevel k-way graph-partitioning, adaptive repartitioning, and parallel multi-constrained partitioning schemes. |
@@ -292,6 +296,7 @@
 | [ictce](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL. |
 | [iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C/C++ and Fortran compilers, alongside Intel MPI. |
 | [intel](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL. |
+| iompi | &nbsp; |
 | [PRACE](http://www.prace-ri.eu/PRACE-Common-Production) | The PRACE Common Production Environment (PCPE) is a set of software tools and libraries that are planned to be available on all PRACE execution sites. The PCPE also defines a set of environment variables that try to make compilation on all sites as homogeneous and simple as possible. |
 | [prace](http://www.prace-ri.eu/PRACE-Common-Production) | PRACE Common Production Environment (PCPE) Initialisation of the PRACE common production environment. This allows you to assume that the following tools/libraries are available by default in your PATH/environment. * Fortran, C, C++ Compilers * MPI * BLAS, LAPACK, BLACS, ScaLAPACK * FFTW * HDF5, NetCDF The compiler commands on are: * mpif90 - Fortran compiler * mpicc - C compiler * mpicxx - C++ compiler For more information on the PCPE please see the documentation at: http://www.prace-ri.eu/PRACE-Common-Production For help using this system, please see Local User Guide available at: http://prace-ri.eu/Best-Practice-Guide-Anselm-HTML |
 
@@ -299,6 +304,7 @@
 
 | Module | Description |
 | ------ | ----------- |
+| ANSYS | &nbsp; |
 | [APR](http://apr.apache.org/) | Apache Portable Runtime (APR) libraries. |
 | [APR-util](http://apr.apache.org/) | Apache Portable Runtime (APR) util libraries. |
 | [Bash](http://www.gnu.org/software/bash) | Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh). |
@@ -310,6 +316,7 @@
 | [git](http://git-scm.com/) | Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency. |
 | globus | &nbsp; |
 | [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress |
+| help2man | &nbsp; |
 | [HPL](http://www.netlib.org/benchmark/hpl/) | HPL is a software package that solves a (random) dense linear system in double precision (64 bits) arithmetic on distributed-memory computers. It can thus be regarded as a portable as well as freely available implementation of the High Performance Computing Linpack Benchmark. |
 | [Inspector](http://software.intel.com/en-us/intel-inspector-xe) | Intel Inspector XE 2013 is an easy to use memory error checker and thread checker for serial and parallel applications |
 | [itac](http://software.intel.com/en-us/intel-trace-analyzer/) | The Intel Trace Collector is a low-overhead tracing library that performs event-based tracing in applications. The Intel Trace Analyzer provides a convenient way to monitor application activities gathered by the Intel Trace Collector through graphical displays. |
@@ -329,9 +336,11 @@
 | Score-P | &nbsp; |
 | [SDE](https://software.intel.com/en-us/articles/intel-software-development-emulator) | Intel Software Development Emulator is a pintool that enables the development of applications using instruction set extensions that are not currently implemented in hardware. |
 | [Serf](http://serf.apache.org/) | The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library |
+| Singularity | &nbsp; |
 | [Subversion](http://subversion.apache.org/) | Subversion is an open source version control system. |
 | [Szip](http://www.hdfgroup.org/doc_resource/SZIP/) | Szip compression software, providing lossless compression of scientific data |
 | [tcsh](http://www.tcsh.org) | Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax. |
+| tmux | &nbsp; |
 | [turbovnc](http://www.turbovnc.org) | TurboVNC is a derivative of VNC (Virtual Network Computing) that is tuned to provide peak performance for 3D and video workloads. |
 | [util-linux](http://www.kernel.org/pub/linux/utils/util-linux) | Set of Linux utilities |
 | [VDE2](http://vde.sourceforge.net) | VDE is an ethernet compliant virtual network that can be spawned over a set of physical computer over the Internet. VDE is part of virtualsquare project. |
diff --git a/docs.it4i/salomon/capacity-computing.md b/docs.it4i/salomon/capacity-computing.md
index aa947db011b4ba820f3736b445e1bb639233ef14..39b4c029903b04c067c9f9e2d7e48d13fac3f133 100644
--- a/docs.it4i/salomon/capacity-computing.md
+++ b/docs.it4i/salomon/capacity-computing.md
@@ -41,7 +41,7 @@ Assume we have 900 input files with name beginning with "file" (e. g. file001, .
 
 First, we create a tasklist file (or subjobs list), listing all tasks (subjobs) - all input files in our example:
 
-```bash
+```console
 $ find . -name 'file*' > tasklist
 ```
 
@@ -78,7 +78,7 @@ If huge number of parallel multicore (in means of multinode multithread, e. g. M
 
 To submit the job array, use the qsub -J command. The 900 jobs of the [example above](capacity-computing/#array_example) may be submitted like this:
 
-```bash
+```console
 $ qsub -N JOBNAME -J 1-900 jobscript
 506493[].isrv5
 ```
@@ -87,7 +87,7 @@ In this example, we submit a job array of 900 subjobs. Each subjob will run on f
 
 Sometimes for testing purposes, you may need to submit only one-element array. This is not allowed by PBSPro, but there's a workaround:
 
-```bash
+```console
 $ qsub -N JOBNAME -J 9-10:2 jobscript
 ```
 
@@ -97,7 +97,7 @@ This will only choose the lower index (9 in this example) for submitting/running
 
 Check status of the job array by the qstat command.
 
-```bash
+```console
 $ qstat -a 506493[].isrv5
 
 isrv5:
@@ -111,7 +111,7 @@ The status B means that some subjobs are already running.
 
 Check status of the first 100 subjobs by the qstat command.
 
-```bash
+```console
 $ qstat -a 12345[1-100].isrv5
 
 isrv5:
@@ -129,7 +129,7 @@ Job ID          Username Queue    Jobname    SessID NDS TSK Memory Time S Time
 
 Delete the entire job array. Running subjobs will be killed, queueing subjobs will be deleted.
 
-```bash
+```console
 $ qdel 12345[].isrv5
 ```
 
@@ -137,17 +137,17 @@ Deleting large job arrays may take a while.
 
 Display status information for all user's jobs, job arrays, and subjobs.
 
-```bash
+```console
 $ qstat -u $USER -t
 ```
 
 Display status information for all user's subjobs.
 
-```bash
+```console
 $ qstat -u $USER -tJ
 ```
 
-Read more on job arrays in the [PBSPro Users guide](../../pbspro-documentation/).
+Read more on job arrays in the [PBSPro Users guide](../pbspro/).
 
 ## GNU Parallel
 
@@ -158,7 +158,7 @@ GNU parallel is a shell tool for executing jobs in parallel using one or more co
 
 For more information and examples see the parallel man page:
 
-```bash
+```console
 $ module add parallel
 $ man parallel
 ```
@@ -173,7 +173,7 @@ Assume we have 101 input files with name beginning with "file" (e. g. file001, .
 
 First, we create a tasklist file, listing all tasks - all input files in our example:
 
-```bash
+```console
 $ find . -name 'file*' > tasklist
 ```
 
@@ -211,7 +211,7 @@ In this example, tasks from tasklist are executed via the GNU parallel. The jobs
 
 To submit the job, use the qsub command. The 101 tasks' job of the [example above](capacity-computing/#gp_example) may be submitted like this:
 
-```bash
+```console
 $ qsub -N JOBNAME jobscript
 12345.dm2
 ```
@@ -241,13 +241,13 @@ Assume we have 992 input files with name beginning with "file" (e. g. file001, .
 
 First, we create a tasklist file, listing all tasks - all input files in our example:
 
-```bash
+```console
 $ find . -name 'file*' > tasklist
 ```
 
 Next we create a file, controlling how many tasks will be executed in one subjob
 
-```bash
+```console
 $ seq 32 > numtasks
 ```
 
@@ -296,7 +296,7 @@ When deciding this values, think about following guiding rules :
 
 To submit the job array, use the qsub -J command. The 992 tasks' job of the [example above](capacity-computing/#combined_example) may be submitted like this:
 
-```bash
+```console
 $ qsub -N JOBNAME -J 1-992:32 jobscript
 12345[].dm2
 ```
@@ -312,7 +312,7 @@ Download the examples in [capacity.zip](capacity.zip), illustrating the above li
 
 Unzip the archive in an empty directory on Anselm and follow the instructions in the README file
 
-```bash
+```console
 $ unzip capacity.zip
 $ cd capacity
 $ cat README
diff --git a/docs.it4i/salomon/environment-and-modules.md b/docs.it4i/salomon/environment-and-modules.md
index 9671013566e7621e42b2d0cdf693eed783f13197..5de3931c3d2b060d69a544343836c46caba20509 100644
--- a/docs.it4i/salomon/environment-and-modules.md
+++ b/docs.it4i/salomon/environment-and-modules.md
@@ -4,7 +4,7 @@
 
 After logging in, you may want to configure the environment. Write your preferred path definitions, aliases, functions and module loads in the .bashrc file
 
-```bash
+```console
 # ./bashrc
 
 # Source global definitions
@@ -32,7 +32,7 @@ In order to configure your shell for running particular application on Salomon w
 
 Application modules on Salomon cluster are built using [EasyBuild](http://hpcugent.github.io/easybuild/ "EasyBuild"). The modules are divided into the following structure:
 
-```bash
+```console
  base: Default module class
  bio: Bioinformatics, biology and biomedical
  cae: Computer Aided Engineering (incl. CFD)
@@ -63,33 +63,33 @@ The modules may be loaded, unloaded and switched, according to momentary needs.
 
 To check available modules use
 
-```bash
-$ module avail
+```console
+$ module avail **or** ml av
 ```
 
 To load a module, for example the Open MPI module use
 
-```bash
-$ module load OpenMPI
+```console
+$ module load OpenMPI **or** ml OpenMPI
 ```
 
 loading the Open MPI module will set up paths and environment variables of your active shell such that you are ready to run the Open MPI software
 
 To check loaded modules use
 
-```bash
-$ module list
+```console
+$ module list **or** ml
 ```
 
 To unload a module, for example the Open MPI module use
 
-```bash
-$ module unload OpenMPI
+```console
+$ module unload OpenMPI **or** ml -OpenMPI
 ```
 
 Learn more on modules by reading the module man page
 
-```bash
+```console
 $ man module
 ```
 
diff --git a/docs.it4i/salomon/ib-single-plane-topology.md b/docs.it4i/salomon/ib-single-plane-topology.md
index b1a5381e6280b9c1ace1c84c90b12ef2d4641650..859858c83b4333cd9fdad3383ba40694da8d9d27 100644
--- a/docs.it4i/salomon/ib-single-plane-topology.md
+++ b/docs.it4i/salomon/ib-single-plane-topology.md
@@ -20,7 +20,7 @@ Each color in each physical IRU represents one dual-switch ASIC switch.
 
 Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](7d-enhanced-hypercube/).
 
-As shown in a diagram ![IB Topology](../img/Salomon_IB_topology.png)
+As shown in a diagram [IB Topology](7d-enhanced-hypercube/#ib-topology)
 
 * Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack.
 * Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack.
diff --git a/docs.it4i/salomon/job-priority.md b/docs.it4i/salomon/job-priority.md
index 265afe5441ba0ad549348c7de9edc07c3fb078fb..5455a1ac4df144a9defc62d7a6511b3890bac5e7 100644
--- a/docs.it4i/salomon/job-priority.md
+++ b/docs.it4i/salomon/job-priority.md
@@ -26,7 +26,7 @@ Fair-share priority is used for ranking jobs with equal queue priority.
 
 Fair-share priority is calculated as
 
-![](../img/fairshare_formula.png)
+---8<--- "fairshare_formula.md"
 
 where MAX_FAIRSHARE has value 1E6,
 usage<sub>Project</sub> is cumulated usage by all members of selected project,
@@ -53,7 +53,7 @@ Eligible time can be seen as eligible_time attribute of job.
 
 Job execution priority (job sort formula) is calculated as:
 
-![](../img/job_sort_formula.png)
+---8<--- "job_sort_formula.md"
 
 ### Job backfilling
 
@@ -73,3 +73,5 @@ Specifying more accurate walltime enables better scheduling, better execution ti
 ### Job Placement
 
 Job [placement can be controlled by flags during submission](job-submission-and-execution/#job_placement).
+
+---8<--- "mathjax.md"
diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md
index e7a4c4ff0039815504804e9f5fcb30959e8713e6..9c7ce35a6ba00c469e2b2c63a480e42e5b36e85c 100644
--- a/docs.it4i/salomon/job-submission-and-execution.md
+++ b/docs.it4i/salomon/job-submission-and-execution.md
@@ -16,7 +16,7 @@ When allocating computational resources for the job, please specify
 
 Submit the job using the qsub command:
 
-```bash
+```console
 $ qsub -A Project_ID -q queue -l select=x:ncpus=y,walltime=[[hh:]mm:]ss[.ms] jobscript
 ```
 
@@ -27,25 +27,25 @@ The qsub submits the job into the queue, in another words the qsub command creat
 
 ### Job Submission Examples
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=64:ncpus=24,walltime=03:00:00 ./myjob
 ```
 
 In this example, we allocate 64 nodes, 24 cores per node, for 3 hours. We allocate these resources via the qprod queue, consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. Jobscript myjob will be executed on the first node in the allocation.
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=24 -I
 ```
 
 In this example, we allocate 4 nodes, 24 cores per node, for 1 hour. We allocate these resources via the qexp queue. The resources will be available interactively
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qlong -l select=10:ncpus=24 ./myjob
 ```
 
 In this example, we allocate 10 nodes, 24 cores per node, for 72 hours. We allocate these resources via the qlong queue. Jobscript myjob will be executed on the first node in the allocation.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qfree -l select=10:ncpus=24 ./myjob
 ```
 
@@ -57,13 +57,13 @@ To allocate a node with Xeon Phi co-processor, user needs to specify that in sel
 
 The absence of specialized queue for accessing the nodes with cards means, that the Phi cards can be utilized in any queue, including qexp for testing/experiments, qlong for longer jobs, qfree after the project resources have been spent, etc. The Phi cards are thus also available to PRACE users. There's no need to ask for permission to utilize the Phi cards in project proposals.
 
-```bash
+```console
 $ qsub  -A OPEN-0-0 -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 ./myjob
 ```
 
 In this example, we allocate 1 node, with 24 cores, with 2 Xeon Phi 7120p cards, running batch job ./myjob. The default time for qprod is used, e. g. 24 hours.
 
-```bash
+```console
 $ qsub  -A OPEN-0-0 -I -q qlong -l select=4:ncpus=24:accelerator=True:naccelerators=2 -l walltime=56:00:00 -I
 ```
 
@@ -72,19 +72,19 @@ In this example, we allocate 4 nodes, with 24 cores per node (totalling 96 cores
 ### UV2000 SMP
 
 !!! note
-    14 NUMA nodes available on UV2000
+    13 NUMA nodes available on UV2000
     Per NUMA node allocation.
     Jobs are isolated by cpusets.
 
-The UV2000 (node uv1) offers 3328GB of RAM and 112 cores, distributed in 14 NUMA nodes. A NUMA node packs 8 cores and approx. 236GB RAM. In the PBS the UV2000 provides 14 chunks, a chunk per NUMA node (see [Resource allocation policy](resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself.
+The UV2000 (node uv1) offers 3TB of RAM and 104 cores, distributed in 13 NUMA nodes. A NUMA node packs 8 cores and approx. 247GB RAM (with exception, node 11 has only 123GB RAM). In the PBS the UV2000 provides 13 chunks, a chunk per NUMA node (see [Resource allocation policy](resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself.
 
-```bash
- $ qsub -A OPEN-0-0 -q qfat -l select=14 ./myjob
+```console
+ $ qsub -A OPEN-0-0 -q qfat -l select=13 ./myjob
 ```
 
-In this example, we allocate all 14 NUMA nodes (corresponds to 14 chunks), 112 cores of the SGI UV2000 node for 72 hours. Jobscript myjob will be executed on the node uv1.
+In this example, we allocate all 13 NUMA nodes (corresponds to 13 chunks), 104 cores of the SGI UV2000 node for 72 hours. Jobscript myjob will be executed on the node uv1.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qfat -l select=1:mem=2000GB ./myjob
 ```
 
@@ -94,13 +94,13 @@ In this example, we allocate 2000GB of memory on the UV2000 for 72 hours. By req
 
 All qsub options may be [saved directly into the jobscript](#example-jobscript-for-mpi-calculation-with-preloaded-inputs). In such a case, no options to qsub are needed.
 
-```bash
+```console
 $ qsub ./myjob
 ```
 
 By default, the PBS batch system sends an e-mail only when the job is aborted. Disabling mail events completely can be done like this:
 
-```bash
+```console
 $ qsub -m n
 ```
 
@@ -113,13 +113,13 @@ $ qsub -m n
 
 Specific nodes may be selected using PBS resource attribute host (for hostnames):
 
-```bash
+```console
 qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=24:host=r24u35n680+1:ncpus=24:host=r24u36n681 -I
 ```
 
 Specific nodes may be selected using PBS resource attribute cname (for short names in cns[0-1]+ format):
 
-```bash
+```console
 qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=24:host=cns680+1:ncpus=24:host=cns681 -I
 ```
 
@@ -142,7 +142,7 @@ Nodes directly connected to the one InifiBand switch can be allocated using node
 
 In this example, we request all 9 nodes directly connected to the same switch using node grouping placement.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=9:ncpus=24 -l place=group=switch ./myjob
 ```
 
@@ -155,13 +155,13 @@ Nodes directly connected to the specific InifiBand switch can be selected using
 
 In this example, we request all 9 nodes directly connected to r4i1s0sw1 switch.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=9:ncpus=24:switch=r4i1s0sw1 ./myjob
 ```
 
 List of all InifiBand switches:
 
-```bash
+```console
 $ qmgr -c 'print node @a' | grep switch | awk '{print $6}' | sort -u
 r1i0s0sw0
 r1i0s0sw1
@@ -169,12 +169,11 @@ r1i1s0sw0
 r1i1s0sw1
 r1i2s0sw0
 ...
-...
 ```
 
 List of all all nodes directly connected to the specific InifiBand switch:
 
-```bash
+```console
 $ qmgr -c 'p n @d' | grep 'switch = r36sw3' | awk '{print $3}' | sort
 r36u31n964
 r36u32n965
@@ -203,7 +202,7 @@ Nodes located in the same dimension group may be allocated using node grouping o
 
 In this example, we allocate 16 nodes in the same [hypercube dimension](7d-enhanced-hypercube/) 1 group.
 
-```bash
+```console
 $ qsub -A OPEN-0-0 -q qprod -l select=16:ncpus=24 -l place=group=ehc_1d -I
 ```
 
@@ -211,7 +210,7 @@ For better understanding:
 
 List of all groups in dimension 1:
 
-```bash
+```console
 $ qmgr -c 'p n @d' | grep ehc_1d | awk '{print $6}' | sort |uniq -c
      18 r1i0
      18 r1i1
@@ -222,7 +221,7 @@ $ qmgr -c 'p n @d' | grep ehc_1d | awk '{print $6}' | sort |uniq -c
 
 List of all all nodes in specific dimension 1 group:
 
-```bash
+```console
 $ $ qmgr -c 'p n @d' | grep 'ehc_1d = r1i0' | awk '{print $3}' | sort
 r1i0n0
 r1i0n1
@@ -236,7 +235,7 @@ r1i0n11
 !!! note
     Check status of your jobs using the **qstat** and **check-pbs-jobs** commands
 
-```bash
+```console
 $ qstat -a
 $ qstat -a -u username
 $ qstat -an -u username
@@ -245,7 +244,7 @@ $ qstat -f 12345.isrv5
 
 Example:
 
-```bash
+```console
 $ qstat -a
 
 srv11:
@@ -261,7 +260,7 @@ In this example user1 and user2 are running jobs named job1, job2 and job3x. The
 
 Check status of your jobs using check-pbs-jobs command. Check presence of user's PBS jobs' processes on execution hosts. Display load, processes. Display job standard and error output. Continuously display (tail -f) job standard or error output.
 
-```bash
+```console
 $ check-pbs-jobs --check-all
 $ check-pbs-jobs --print-load --print-processes
 $ check-pbs-jobs --print-job-out --print-job-err
@@ -271,7 +270,7 @@ $ check-pbs-jobs --jobid JOBID --tailf-job-out
 
 Examples:
 
-```bash
+```console
 $ check-pbs-jobs --check-all
 JOB 35141.dm2, session_id 71995, user user2, nodes r3i6n2,r3i6n3
 Check session id: OK
@@ -282,7 +281,7 @@ r3i6n3: No process
 
 In this example we see that job 35141.dm2 currently runs no process on allocated node r3i6n2, which may indicate an execution error.
 
-```bash
+```console
 $ check-pbs-jobs --print-load --print-processes
 JOB 35141.dm2, session_id 71995, user user2, nodes r3i6n2,r3i6n3
 Print load
@@ -298,7 +297,7 @@ r3i6n2: 99.7 run-task
 
 In this example we see that job 35141.dm2 currently runs process run-task on node r3i6n2, using one thread only, while node r3i6n3 is empty, which may indicate an execution error.
 
-```bash
+```console
 $ check-pbs-jobs --jobid 35141.dm2 --print-job-out
 JOB 35141.dm2, session_id 71995, user user2, nodes r3i6n2,r3i6n3
 Print job standard output:
@@ -317,19 +316,19 @@ In this example, we see actual output (some iteration loops) of the job 35141.dm
 
 You may release your allocation at any time, using qdel command
 
-```bash
+```console
 $ qdel 12345.isrv5
 ```
 
 You may kill a running job by force, using qsig command
 
-```bash
+```console
 $ qsig -s 9 12345.isrv5
 ```
 
 Learn more by reading the pbs man page
 
-```bash
+```console
 $ man pbs_professional
 ```
 
@@ -345,7 +344,7 @@ The Jobscript is a user made script, controlling sequence of commands for execut
 !!! note
     The jobscript or interactive shell is executed on first of the allocated nodes.
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=24 -N Name0 ./myjob
 $ qstat -n -u username
 
@@ -362,7 +361,7 @@ In this example, the nodes r21u01n577, r21u02n578, r21u03n579, r21u04n580 were a
 !!! note
     The jobscript or interactive shell is by default executed in home directory
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=24 -I
 qsub: waiting for job 15210.isrv5 to start
 qsub: job 15210.isrv5 ready
@@ -380,7 +379,7 @@ The allocated nodes are accessible via ssh from login nodes. The nodes may acces
 
 Calculations on allocated nodes may be executed remotely via the MPI, ssh, pdsh or clush. You may find out which nodes belong to the allocation by reading the $PBS_NODEFILE file
 
-```bash
+```console
 qsub -q qexp -l select=2:ncpus=24 -I
 qsub: waiting for job 15210.isrv5 to start
 qsub: job 15210.isrv5 ready
diff --git a/docs.it4i/salomon/network.md b/docs.it4i/salomon/network.md
index 2f3f8a09f474c12ffe961781c39ea6fbea260a46..91da0de5ee2114ca159ee722f6b5f7db212a9c0d 100644
--- a/docs.it4i/salomon/network.md
+++ b/docs.it4i/salomon/network.md
@@ -16,7 +16,7 @@ The network provides **2170MB/s** transfer rates via the TCP connection (single
 
 ## Example
 
-```bash
+```console
 $ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob
 $ qstat -n -u username
                                                             Req'd Req'd   Elap
@@ -28,14 +28,14 @@ Job ID          Username Queue    Jobname    SessID NDS TSK Memory Time S Time
 
 In this example, we access the node r4i1n0 by Infiniband network via the ib0 interface.
 
-```bash
+```console
 $ ssh 10.17.35.19
 ```
 
 In this example, we get
 information of the Infiniband network.
 
-```bash
+```console
 $ ifconfig
 ....
 inet addr:10.17.35.19....
diff --git a/docs.it4i/salomon/prace.md b/docs.it4i/salomon/prace.md
index f1990d29251e00f4389ba5d23f399777a86ed726..a3c80fa840dd1ff4ccb0dd17cd1f3d82001bdbdb 100644
--- a/docs.it4i/salomon/prace.md
+++ b/docs.it4i/salomon/prace.md
@@ -36,14 +36,14 @@ Most of the information needed by PRACE users accessing the Salomon TIER-1 syste
 
 Before you start to use any of the services don't forget to create a proxy certificate from your certificate:
 
-```bash
-    $ grid-proxy-init
+```console
+$ grid-proxy-init
 ```
 
 To check whether your proxy certificate is still valid (by default it's valid 12 hours), use:
 
-```bash
-    $ grid-proxy-info
+```console
+$ grid-proxy-info
 ```
 
 To access Salomon cluster, two login nodes running GSI SSH service are available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners).
@@ -60,14 +60,14 @@ It is recommended to use the single DNS name salomon-prace.it4i.cz which is dist
 | login3-prace.salomon.it4i.cz | 2222 | gsissh   | login3                           |
 | login4-prace.salomon.it4i.cz | 2222 | gsissh   | login4                           |
 
-```bash
-    $ gsissh -p 2222 salomon-prace.it4i.cz
+```console
+$ gsissh -p 2222 salomon-prace.it4i.cz
 ```
 
 When logging from other PRACE system, the prace_service script can be used:
 
-```bash
-    $ gsissh `prace_service -i -s salomon`
+```console
+$ gsissh `prace_service -i -s salomon`
 ```
 
 #### Access From Public Internet:
@@ -82,27 +82,24 @@ It is recommended to use the single DNS name salomon.it4i.cz which is distribute
 | login3-prace.salomon.it4i.cz | 2222 | gsissh   | login3                           |
 | login4-prace.salomon.it4i.cz | 2222 | gsissh   | login4                           |
 
-```bash
-    $ gsissh -p 2222 salomon.it4i.cz
+```console
+$ gsissh -p 2222 salomon.it4i.cz
 ```
 
 When logging from other PRACE system, the prace_service script can be used:
 
-```bash
-    $ gsissh `prace_service -e -s salomon`
+```console
+$ gsissh `prace_service -e -s salomon`
 ```
 
 Although the preferred and recommended file transfer mechanism is [using GridFTP](prace/#file-transfers), the GSI SSH
 implementation on Salomon supports also SCP, so for small files transfer gsiscp can be used:
 
-```bash
-    $ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ salomon.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_
-
-    $ gsiscp -P 2222 salomon.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
-
-    $ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_
-
-    $ gsiscp -P 2222 salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ salomon.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_
+$ gsiscp -P 2222 salomon.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
+$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_
+$ gsiscp -P 2222 salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 ### Access to X11 Applications (VNC)
@@ -111,8 +108,8 @@ If the user needs to run X11 based graphical application and does not have a X11
 
 If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](../general/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH:
 
-```bash
-    $ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961
+```console
+$ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961
 ```
 
 ### Access With SSH
@@ -138,26 +135,26 @@ There's one control server and three backend servers for striping and/or backup
 
 Copy files **to** Salomon by running the following commands on your local machine:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
 ```
 
 Copy files **from** Salomon:
 
-```bash
-    $ globus-url-copy gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 ### Access From Public Internet
@@ -171,26 +168,26 @@ Or by using prace_service script:
 
 Copy files **to** Salomon by running the following commands on your local machine:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
 ```
 
 Copy files **from** Salomon:
 
-```bash
-    $ globus-url-copy gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 Or by using prace_service script:
 
-```bash
-    $ globus-url-copy gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
+```console
+$ globus-url-copy gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
 ```
 
 Generally both shared file systems are available through GridFTP:
@@ -222,8 +219,8 @@ All system wide installed software on the cluster is made available to the users
 
 PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/prace-common-production-environment/).
 
-```bash
-    $ module load prace
+```console
+$ module load prace
 ```
 
 ### Resource Allocation and Job Execution
@@ -249,10 +246,10 @@ PRACE users should check their project accounting using the [PRACE Accounting To
 Users who have undergone the full local registration procedure (including signing the IT4Innovations Acceptable Use Policy) and who have received local password may check at any time, how many core-hours have been consumed by themselves and their projects using the command "it4ifree". You need to know your user password to use the command and that the displayed core hours are "system core hours" which differ from PRACE "standardized core hours".
 
 !!! note
-    The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients>
+    The **it4ifree** command is a part of it4i.portal.clients package, [located here](https://pypi.python.org/pypi/it4i.portal.clients).
 
-```bash
-    $ it4ifree
+```console
+$ it4ifree
     Password:
          PID    Total   Used   ...by me Free
        -------- ------- ------ -------- -------
@@ -262,9 +259,9 @@ Users who have undergone the full local registration procedure (including signin
 
 By default file system quota is applied. To check the current status of the quota (separate for HOME and SCRATCH) use
 
-```bash
-    $ quota
-    $ lfs quota -u USER_LOGIN /scratch
+```console
+$ quota
+$ lfs quota -u USER_LOGIN /scratch
 ```
 
 If the quota is insufficient, please contact the [support](prace/#help-and-support) and request an increase.
diff --git a/docs.it4i/salomon/resource-allocation-and-job-execution.md b/docs.it4i/salomon/resource-allocation-and-job-execution.md
index a28c2a63a19b0de082214d7e2a2e93da91b0d0e8..c960f3871cc94efbaa070eadae51775b00689405 100644
--- a/docs.it4i/salomon/resource-allocation-and-job-execution.md
+++ b/docs.it4i/salomon/resource-allocation-and-job-execution.md
@@ -1,6 +1,6 @@
 # Resource Allocation and Job Execution
 
-To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide.
+To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro/), especially in the PBS Pro User's Guide.
 
 ## Resources Allocation Policy
 
diff --git a/docs.it4i/salomon/resources-allocation-policy.md b/docs.it4i/salomon/resources-allocation-policy.md
index d705a527d4ed1e0988a4c76575687c23239e41de..98f9d34f8dad971e7b09141da657666881c87a00 100644
--- a/docs.it4i/salomon/resources-allocation-policy.md
+++ b/docs.it4i/salomon/resources-allocation-policy.md
@@ -22,7 +22,7 @@ The resources are allocated to the job in a fair-share fashion, subject to const
 * **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, however only 86 per job. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours.
 * **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 336 nodes without acceleration may be accessed via the qlong queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 \* 48 h)
 * **qmpp**, the massively parallel queue. This queue is intended for massively parallel runs. It is required that active project with nonzero remaining resources is specified to enter the qmpp. All nodes may be accessed via the qmpp queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it.  The maximum runtime in qmpp is 4 hours. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project.
-* **qfat**, the UV2000 queue. This queue is dedicated to access the fat SGI UV2000 SMP machine. The machine (uv1) has 112 Intel IvyBridge cores at 3.3GHz and 3.25TB RAM. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project.
+* **qfat**, the UV2000 queue. This queue is dedicated to access the fat SGI UV2000 SMP machine. The machine (uv1) has 112 Intel IvyBridge cores at 3.3GHz and 3.25TB RAM (8 cores and 128GB RAM are dedicated for system). An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project.
 * **qfree**, the Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue, however no remaining resources are required. Consumed resources will be accounted to the Project. Only 178 nodes without accelerator may be accessed from this queue. Full nodes, 24 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours.
 * **qviz**, the Visualization queue: Intended for pre-/post-processing using OpenGL accelerated graphics. Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 73 GB of RAM and 1/7 of the GPU capacity (default "chunk"). If more GPU power or RAM is required, it is recommended to allocate more chunks (with 4 cores each) up to one whole node per user, so that all 28 cores, 512 GB RAM and whole GPU is exclusive. This is currently also the maximum allowed allocation per one user. One hour of work is allocated by default, the user may ask for 2 hours maximum.
 
@@ -46,13 +46,13 @@ Salomon users may check current queue configuration at <https://extranet.it4i.cz
 
 Display the queue status on Salomon:
 
-```bash
+```console
 $ qstat -q
 ```
 
 The PBS allocation overview may be obtained also using the rspbs command.
 
-```bash
+```console
 $ rspbs
 Usage: rspbs [options]
 
@@ -122,7 +122,7 @@ The resources that are currently subject to accounting are the core-hours. The c
 
 User may check at any time, how many core-hours have been consumed by himself/herself and his/her projects. The command is available on clusters' login nodes.
 
-```bash
+```console
 $ it4ifree
 Password:
      PID    Total   Used   ...by me Free
diff --git a/docs.it4i/salomon/shell-and-data-access.md b/docs.it4i/salomon/shell-and-data-access.md
index 8c0012f110667a08217b18f117bec54f6f7e9dda..c3aad60a094512084e56bd3b3f68f082bda37ee5 100644
--- a/docs.it4i/salomon/shell-and-data-access.md
+++ b/docs.it4i/salomon/shell-and-data-access.md
@@ -11,9 +11,9 @@ The Salomon cluster is accessed by SSH protocol via login nodes login1, login2,
 | ---------------------- | ---- | -------- | ------------------------------------- |
 | salomon.it4i.cz        | 22   | ssh      | round-robin DNS record for login[1-4] |
 | login1.salomon.it4i.cz | 22   | ssh      | login1                                |
-| login1.salomon.it4i.cz | 22   | ssh      | login1                                |
-| login1.salomon.it4i.cz | 22   | ssh      | login1                                |
-| login1.salomon.it4i.cz | 22   | ssh      | login1                                |
+| login2.salomon.it4i.cz | 22   | ssh      | login2                                |
+| login3.salomon.it4i.cz | 22   | ssh      | login3                                |
+| login4.salomon.it4i.cz | 22   | ssh      | login4                                |
 
 The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/)
 
@@ -26,13 +26,13 @@ Private key authentication:
 
 On **Linux** or **Mac**, use
 
-```bash
+```console
 local $ ssh -i /path/to/id_rsa username@salomon.it4i.cz
 ```
 
 If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to set lower permissions to private key file.
 
-```bash
+```console
 local $ chmod 600 /path/to/id_rsa
 ```
 
@@ -40,7 +40,7 @@ On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-a
 
 After logging in, you will see the command prompt:
 
-```bash
+```console
                     _____       _
                    / ____|     | |
                   | (___   __ _| | ___  _ __ ___   ___  _ __
@@ -75,23 +75,23 @@ The authentication is by the [private key](../general/accessing-the-clusters/she
 
 On linux or Mac, use scp or sftp client to transfer the data to Salomon:
 
-```bash
+```console
 local $ scp -i /path/to/id_rsa my-local-file username@salomon.it4i.cz:directory/file
 ```
 
-```bash
+```console
 local $ scp -i /path/to/id_rsa -r my-local-dir username@salomon.it4i.cz:directory
 ```
 
 or
 
-```bash
+```console
 local $ sftp -o IdentityFile=/path/to/id_rsa username@salomon.it4i.cz
 ```
 
 Very convenient way to transfer files in and out of the Salomon computer is via the fuse filesystem [sshfs](http://linux.die.net/man/1/sshfs)
 
-```bash
+```console
 local $ sshfs -o IdentityFile=/path/to/id_rsa username@salomon.it4i.cz:. mountpoint
 ```
 
@@ -99,7 +99,7 @@ Using sshfs, the users Salomon home directory will be mounted on your local comp
 
 Learn more on ssh, scp and sshfs by reading the manpages
 
-```bash
+```console
 $ man ssh
 $ man scp
 $ man sshfs
@@ -136,7 +136,7 @@ It works by tunneling the connection from Salomon back to users workstation and
 
 Pick some unused port on Salomon login node  (for example 6000) and establish the port forwarding:
 
-```bash
+```console
 local $ ssh -R 6000:remote.host.com:1234 salomon.it4i.cz
 ```
 
@@ -146,7 +146,7 @@ Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration
 
 Port forwarding may be established directly to the remote host. However, this requires that user has ssh access to remote.host.com
 
-```bash
+```console
 $ ssh -L 6000:localhost:1234 remote.host.com
 ```
 
@@ -160,7 +160,7 @@ First, establish the remote port forwarding form the login node, as [described a
 
 Second, invoke port forwarding from the compute node to the login node. Insert following line into your jobscript or interactive shell
 
-```bash
+```console
 $ ssh  -TN -f -L 6000:localhost:6000 login1
 ```
 
@@ -175,7 +175,7 @@ Port forwarding is static, each single port is mapped to a particular port on re
 
 To establish local proxy server on your workstation, install and run SOCKS proxy server software. On Linux, sshd demon provides the functionality. To establish SOCKS proxy server listening on port 1080 run:
 
-```bash
+```console
 local $ ssh -D 1080 localhost
 ```
 
@@ -183,7 +183,7 @@ On Windows, install and run the free, open source [Sock Puppet](http://sockspupp
 
 Once the proxy server is running, establish ssh port forwarding from Salomon to the proxy server, port 1080, exactly as [described above](#port-forwarding-from-login-nodes).
 
-```bash
+```console
 local $ ssh -R 6000:localhost:1080 salomon.it4i.cz
 ```
 
diff --git a/docs.it4i/salomon/software/ansys/ansys-fluent.md b/docs.it4i/salomon/software/ansys/ansys-fluent.md
index 33e711b285cc8066604c43ebb7c943dcb1294fb6..27469a1c559355d1347ba3cfd76e303893caeb38 100644
--- a/docs.it4i/salomon/software/ansys/ansys-fluent.md
+++ b/docs.it4i/salomon/software/ansys/ansys-fluent.md
@@ -44,7 +44,7 @@ Working directory has to be created before sending pbs job into the queue. Input
 
 Journal file with definition of the input geometry and boundary conditions and defined process of solution has e.g. the following structure:
 
-```bash
+```console
     /file/read-case aircraft_2m.cas.gz
     /solve/init
     init
@@ -58,7 +58,7 @@ The appropriate dimension of the problem has to be set by parameter (2d/3d).
 
 1. Fast way to run Fluent from command line
 
-```bash
+```console
 fluent solver_version [FLUENT_options] -i journal_file -pbs
 ```
 
@@ -68,7 +68,7 @@ This syntax will start the ANSYS FLUENT job under PBS Professional using the qsu
 
 The sample script uses a configuration file called pbs_fluent.conf if no command line arguments are present. This configuration file should be present in the directory from which the jobs are submitted (which is also the directory in which the jobs are executed). The following is an example of what the content of pbs_fluent.conf can be:
 
-```bash
+```console
 input="example_small.flin"
 case="Small-1.65m.cas"
 fluent_args="3d -pmyrinet"
@@ -145,7 +145,7 @@ It runs the jobs out of the directory from which they are submitted (PBS_O_WORKD
 
 Fluent could be run in parallel only under Academic Research license. To do so this ANSYS Academic Research license must be placed before ANSYS CFD license in user preferences. To make this change anslic_admin utility should be run
 
-```bash
+```console
 /ansys_inc/shared_les/licensing/lic_admin/anslic_admin
 ```
 
diff --git a/docs.it4i/salomon/software/ansys/ansys.md b/docs.it4i/salomon/software/ansys/ansys.md
index f93524a3e580f8a5c83302f8d1cd9997bb68c2be..d7e0f2e1444ddc77dd861a4cce4eef06b4c78a6c 100644
--- a/docs.it4i/salomon/software/ansys/ansys.md
+++ b/docs.it4i/salomon/software/ansys/ansys.md
@@ -6,8 +6,8 @@ Anselm provides as commercial as academic variants. Academic variants are distin
 
 To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module:
 
-```bash
-    $ module load ansys
+```console
+$ ml ansys
 ```
 
 ANSYS supports interactive regime, but due to assumed solution of extremely difficult tasks it is not recommended.
diff --git a/docs.it4i/salomon/software/ansys/licensing.md b/docs.it4i/salomon/software/ansys/licensing.md
index 04ff6513349ccede25a0846dd21227251e954732..eac78966d4b5183b2f0052d2ab6aea37f28eccc5 100644
--- a/docs.it4i/salomon/software/ansys/licensing.md
+++ b/docs.it4i/salomon/software/ansys/licensing.md
@@ -18,6 +18,7 @@ The licence intended to be used for science and research, publications, students
 
 * 16.1
 * 17.0
+* 18.0
 
 ## License Preferences
 
diff --git a/docs.it4i/salomon/software/ansys/setting-license-preferences.md b/docs.it4i/salomon/software/ansys/setting-license-preferences.md
index fe14541d46b1fe4cab38eb7b883c58e40e03dd32..b3f594d14863cde6aaa28f7a5139223d30a7d95b 100644
--- a/docs.it4i/salomon/software/ansys/setting-license-preferences.md
+++ b/docs.it4i/salomon/software/ansys/setting-license-preferences.md
@@ -6,8 +6,8 @@ Thus you need to configure preferred license order with ANSLIC_ADMIN. Please fol
 
 Launch the ANSLIC_ADMIN utility in a graphical environment:
 
-```bash
-     $ANSYSLIC_DIR/lic_admin/anslic_admin
+```console
+$ANSYSLIC_DIR/lic_admin/anslic_admin
 ```
 
 ANSLIC_ADMIN Utility will be run
diff --git a/docs.it4i/salomon/software/ansys/workbench.md b/docs.it4i/salomon/software/ansys/workbench.md
index 8ed07d789dea69798e68c177ac1612a3e391ec88..1b138ccd09fa64fd6ccbafbcb40ff14b2959bad4 100644
--- a/docs.it4i/salomon/software/ansys/workbench.md
+++ b/docs.it4i/salomon/software/ansys/workbench.md
@@ -8,7 +8,7 @@ It is possible to run Workbench scripts in batch mode. You need to configure sol
 
 Enable Distribute Solution checkbox and enter number of cores (eg. 48 to run on two Salomon nodes). If you want the job to run on more then 1 node, you must also provide a so called MPI appfile. In the Additional Command Line Arguments input field, enter:
 
-```bash
+```console
     -mpifile /path/to/my/job/mpifile.txt
 ```
 
diff --git a/docs.it4i/salomon/software/chemistry/nwchem.md b/docs.it4i/salomon/software/chemistry/nwchem.md
index a26fc701ee44585dbab1f942685b92d9190adfa5..add429da99d2044e2ddaa64d29350e766c558bc2 100644
--- a/docs.it4i/salomon/software/chemistry/nwchem.md
+++ b/docs.it4i/salomon/software/chemistry/nwchem.md
@@ -15,8 +15,8 @@ The following versions are currently installed:
 
 For a current list of installed versions, execute:
 
-```bash
-    module avail NWChem
+```console
+$ ml av NWChem
 ```
 
 The recommend to use version 6.5. Version 6.3 fails on Salomon nodes with accelerator, because it attempts to communicate over scif0 interface. In 6.5 this is avoided by setting ARMCI_OPENIB_DEVICE=mlx4_0, this setting is included in the module.
diff --git a/docs.it4i/salomon/software/chemistry/phono3py.md b/docs.it4i/salomon/software/chemistry/phono3py.md
index 3f747d23bc9775f80137c0d6e4f1b4821d97439b..5f366baa1e6acb0cb948cd473a9acb65243691c8 100644
--- a/docs.it4i/salomon/software/chemistry/phono3py.md
+++ b/docs.it4i/salomon/software/chemistry/phono3py.md
@@ -4,11 +4,14 @@
 
 This GPL software calculates phonon-phonon interactions via the third order force constants. It allows to obtain lattice thermal conductivity, phonon lifetime/linewidth, imaginary part of self energy at the lowest order, joint density of states (JDOS) and weighted-JDOS. For details see Phys. Rev. B 91, 094306 (2015) and <http://atztogo.github.io/phono3py/index.html>
 
-!!! note
-    Load the phono3py/0.9.14-ictce-7.3.5-Python-2.7.9 module
+Available modules
 
-```bash
-$ module load phono3py/0.9.14-ictce-7.3.5-Python-2.7.9
+```console
+$ ml av phono3py
+```
+
+```console
+$ ml phono3py
 ```
 
 ## Example of Calculating Thermal Conductivity of Si Using VASP Code.
@@ -17,7 +20,7 @@ $ module load phono3py/0.9.14-ictce-7.3.5-Python-2.7.9
 
 One needs to calculate second order and third order force constants using the diamond structure of silicon stored in [POSCAR](poscar-si)  (the same form as in VASP) using single displacement calculations within supercell.
 
-```bash
+```console
 $ cat POSCAR
  Si
    1.0
@@ -39,14 +42,14 @@ Direct
 
 ### Generating Displacement Using 2 by 2 by 2 Supercell for Both Second and Third Order Force Constants
 
-```bash
+```console
 $ phono3py -d --dim="2 2 2" -c POSCAR
 ```
 
  111 displacements is created stored in
 disp_fc3.yaml, and the structure input files with this displacements are POSCAR-00XXX, where the XXX=111.
 
-```bash
+```console
 disp_fc3.yaml POSCAR-00008 POSCAR-00017 POSCAR-00026 POSCAR-00035 POSCAR-00044 POSCAR-00053 POSCAR-00062 POSCAR-00071 POSCAR-00080 POSCAR-00089 POSCAR-00098 POSCAR-00107
 POSCAR         POSCAR-00009 POSCAR-00018 POSCAR-00027 POSCAR-00036 POSCAR-00045 POSCAR-00054 POSCAR-00063 POSCAR-00072 POSCAR-00081 POSCAR-00090 POSCAR-00099 POSCAR-00108
 POSCAR-00001   POSCAR-00010 POSCAR-00019 POSCAR-00028 POSCAR-00037 POSCAR-00046 POSCAR-00055 POSCAR-00064 POSCAR-00073 POSCAR-00082 POSCAR-00091 POSCAR-00100 POSCAR-00109
@@ -60,7 +63,7 @@ POSCAR-00007   POSCAR-00016 POSCAR-00025 POSCAR-00034 POSCAR-00043 POSCAR-00052
 
 For each displacement the forces needs to be calculated, i.e. in form of the output file of VASP (vasprun.xml). For a single VASP calculations one needs [KPOINTS](KPOINTS), [POTCAR](POTCAR), [INCAR](INCAR) in your case directory (where you have POSCARS) and those 111 displacements calculations can be generated by [prepare.sh](prepare.sh) script. Then each of the single 111 calculations is submitted [run.sh](run.sh) by [submit.sh](submit.sh).
 
-```bash
+```console
 $./prepare.sh
 $ls
 disp-00001 disp-00009 disp-00017 disp-00025 disp-00033 disp-00041 disp-00049 disp-00057 disp-00065 disp-00073 disp-00081 disp-00089 disp-00097 disp-00105     INCAR
@@ -75,7 +78,7 @@ disp-00008 disp-00016 disp-00024 disp-00032 disp-00040 disp-00048 disp-00056 dis
 
 Taylor your run.sh script to fit into your project and other needs and submit all 111 calculations using submit.sh script
 
-```bash
+```console
 $ ./submit.sh
 ```
 
@@ -83,13 +86,13 @@ $ ./submit.sh
 
 Once all jobs are finished and vasprun.xml is created in each disp-XXXXX directory the collection is done by
 
-```bash
+```console
 $ phono3py --cf3 disp-{00001..00111}/vasprun.xml
 ```
 
 and `disp_fc2.yaml, FORCES_FC2`, `FORCES_FC3` and disp_fc3.yaml should appear and put into the hdf format by
 
-```bash
+```console
 $ phono3py --dim="2 2 2" -c POSCAR
 ```
 
@@ -99,13 +102,13 @@ resulting in `fc2.hdf5` and `fc3.hdf5`
 
 The phonon lifetime calculations takes some time, however is independent on grid points, so could be splitted:
 
-```bash
+```console
 $ phono3py --fc3 --fc2 --dim="2 2 2" --mesh="9 9 9" --sigma 0.1 --wgp
 ```
 
 ### Inspecting ir_grid_points.yaml
 
-```bash
+```console
 $ grep grid_point ir_grid_points.yaml
 num_reduced_ir_grid_points: 35
 ir_grid_points:  # [address, weight]
@@ -148,18 +151,18 @@ ir_grid_points:  # [address, weight]
 
 one finds which grid points needed to be calculated, for instance using following
 
-```bash
+```console
 $ phono3py --fc3 --fc2 --dim="2 2 2" --mesh="9 9 9" -c POSCAR  --sigma 0.1 --br --write-gamma --gp="0 1 2
 ```
 
 one calculates grid points 0, 1, 2. To automize one can use for instance scripts to submit 5 points in series, see [gofree-cond1.sh](gofree-cond1.sh)
 
-```bash
+```console
 $ qsub gofree-cond1.sh
 ```
 
 Finally the thermal conductivity result is produced by grouping single conductivity per grid calculations using
 
-```bash
+```console
 $ phono3py --fc3 --fc2 --dim="2 2 2" --mesh="9 9 9" --br --read_gamma
 ```
diff --git a/docs.it4i/salomon/software/compilers.md b/docs.it4i/salomon/software/compilers.md
index 8e62965ff71b3afbd4e178c5019a0101597401b5..a49aa8eb4dfa2d832572e8c225b6ceccdd84bc82 100644
--- a/docs.it4i/salomon/software/compilers.md
+++ b/docs.it4i/salomon/software/compilers.md
@@ -29,25 +29,25 @@ For information about the usage of Intel Compilers and other Intel products, ple
 
 The Portland Group Cluster Development Kit (PGI CDK) is available.
 
-```bash
-    $ module load PGI
-    $ pgcc -v
-    $ pgc++ -v
-    $ pgf77 -v
-    $ pgf90 -v
-    $ pgf95 -v
-    $ pghpf -v
+```console
+$ module load PGI
+$ pgcc -v
+$ pgc++ -v
+$ pgf77 -v
+$ pgf90 -v
+$ pgf95 -v
+$ pghpf -v
 ```
 
 The PGI CDK also incudes tools for debugging and profiling.
 
 PGDBG OpenMP/MPI debugger and PGPROF OpenMP/MPI profiler are available
 
-```bash
-    $ module load PGI
-    $ module load Java
-    $ pgdbg &
-    $ pgprof &
+```console
+$ module load PGI
+$ module load Java
+$ pgdbg &
+$ pgprof &
 ```
 
 For more information, see the [PGI page](http://www.pgroup.com/products/pgicdk.htm).
@@ -58,21 +58,21 @@ For compatibility reasons there are still available the original (old 4.4.7-11)
 
 It is strongly recommended to use the up to date version which comes with the module GCC:
 
-```bash
-    $ module load GCC
-    $ gcc -v
-    $ g++ -v
-    $ gfortran -v
+```console
+$ module load GCC
+$ gcc -v
+$ g++ -v
+$ gfortran -v
 ```
 
 With the module loaded two environment variables are predefined. One for maximum optimizations on the cluster's architecture, and the other for debugging purposes:
 
-```bash
-    $ echo $OPTFLAGS
-    -O3 -march=native
+```console
+$ echo $OPTFLAGS
+-O3 -march=native
 
-    $ echo $DEBUGFLAGS
-    -O0 -g
+$ echo $DEBUGFLAGS
+-O0 -g
 ```
 
 For more information about the possibilities of the compilers, please see the man pages.
@@ -88,41 +88,41 @@ UPC is supported by two compiler/runtime implementations:
 
 To use the GNU UPC compiler and run the compiled binaries use the module gupc
 
-```bash
-    $ module add gupc
-    $ gupc -v
-    $ g++ -v
+```console
+$ module add gupc
+$ gupc -v
+$ g++ -v
 ```
 
 Simple program to test the compiler
 
-```bash
-    $ cat count.upc
-
-    /* hello.upc - a simple UPC example */
-    #include <upc.h>
-    #include <stdio.h>
-
-    int main() {
-      if (MYTHREAD == 0) {
-        printf("Welcome to GNU UPC!!!n");
-      }
-      upc_barrier;
-      printf(" - Hello from thread %in", MYTHREAD);
-      return 0;
-    }
+```cpp
+$ cat count.upc
+
+/* hello.upc - a simple UPC example */
+#include <upc.h>
+#include <stdio.h>
+
+int main() {
+  if (MYTHREAD == 0) {
+    printf("Welcome to GNU UPC!!!n");
+  }
+  upc_barrier;
+  printf(" - Hello from thread %in", MYTHREAD);
+  return 0;
+}
 ```
 
 To compile the example use
 
-```bash
-    $ gupc -o count.upc.x count.upc
+```console
+$ gupc -o count.upc.x count.upc
 ```
 
 To run the example with 5 threads issue
 
-```bash
-    $ ./count.upc.x -fupc-threads-5
+```console
+$ ./count.upc.x -fupc-threads-5
 ```
 
 For more information see the man pages.
@@ -131,9 +131,9 @@ For more information see the man pages.
 
 To use the Berkley UPC compiler and runtime environment to run the binaries use the module bupc
 
-```bash
-    $ module add BerkeleyUPC/2.16.2-gompi-2015b
-    $ upcc -version
+```console
+$ module add BerkeleyUPC/2.16.2-gompi-2015b
+$ upcc -version
 ```
 
 As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only.
@@ -145,41 +145,41 @@ For production runs, it is recommended to use the native InfiniBand implementati
 
 Example UPC code:
 
-```bash
-    $ cat hello.upc
-
-    /* hello.upc - a simple UPC example */
-    #include <upc.h>
-    #include <stdio.h>
-
-    int main() {
-      if (MYTHREAD == 0) {
-        printf("Welcome to Berkeley UPC!!!n");
-      }
-      upc_barrier;
-      printf(" - Hello from thread %in", MYTHREAD);
-      return 0;
-    }
+```cpp
+$ cat hello.upc
+
+/* hello.upc - a simple UPC example */
+#include <upc.h>
+#include <stdio.h>
+
+int main() {
+  if (MYTHREAD == 0) {
+    printf("Welcome to Berkeley UPC!!!n");
+  }
+  upc_barrier;
+  printf(" - Hello from thread %in", MYTHREAD);
+  return 0;
+}
 ```
 
 To compile the example with the "ibv" UPC network use
 
-```bash
-    $ upcc -network=ibv -o hello.upc.x hello.upc
+```console
+$ upcc -network=ibv -o hello.upc.x hello.upc
 ```
 
 To run the example with 5 threads issue
 
-```bash
-    $ upcrun -n 5 ./hello.upc.x
+```console
+$ upcrun -n 5 ./hello.upc.x
 ```
 
 To run the example on two compute nodes using all 48 cores, with 48 threads, issue
 
-```bash
-    $ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=24
-    $ module add bupc
-    $ upcrun -n 48 ./hello.upc.x
+```console
+$ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=24
+$ module add bupc
+$ upcrun -n 48 ./hello.upc.x
 ```
 
 For more information see the man pages.
diff --git a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md
index 05a6d2944b2e8db354e134c8f506f87b70f0531a..431294469311b408c9e023c17347cae239037622 100644
--- a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md
+++ b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md
@@ -22,22 +22,22 @@ On the clusters COMSOL is available in the latest stable version. There are two
 
 To load the of COMSOL load the module
 
-```bash
-$ module load COMSOL/51-EDU
+```console
+$ ml COMSOL/51-EDU
 ```
 
 By default the **EDU variant** will be loaded. If user needs other version or variant, load the particular version. To obtain the list of available versions use
 
-```bash
-$ module avail COMSOL
+```console
+$ ml av  COMSOL
 ```
 
 If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
-```bash
+```console
 $ xhost +
 $ qsub -I -X -A PROJECT_ID -q qprod -l select=1:ppn=24
-$ module load COMSOL
+$ ml COMSOL
 $ comsol
 ```
 
@@ -74,13 +74,13 @@ Working directory has to be created before sending the (comsol.pbs) job script i
 
 COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB.
 
-LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](../isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode.
+LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](../../../anselm/software/isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode.
 
-```bash
+```console
 $ xhost +
 $ qsub -I -X -A PROJECT_ID -q qexp -l select=1:ppn=24
-$ module load MATLAB
-$ module load COMSOL
+$ ml MATLAB
+$ ml COMSOL
 $ comsol server MATLAB
 ```
 
diff --git a/docs.it4i/salomon/software/debuggers/Introduction.md b/docs.it4i/salomon/software/debuggers/Introduction.md
index a5c9cfb60154fbaf13faebaf15a508597b40703f..4ce2fc77b013659f5b128408e4ec5f0e78c9c686 100644
--- a/docs.it4i/salomon/software/debuggers/Introduction.md
+++ b/docs.it4i/salomon/software/debuggers/Introduction.md
@@ -10,9 +10,9 @@ Intel debugger is no longer available since Parallel Studio version 2015
 
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment.
 
-```bash
-    $ module load intel
-    $ idb
+```console
+$ ml intel
+$ idb
 ```
 
 Read more at the [Intel Debugger](../intel-suite/intel-debugger/) page.
@@ -21,9 +21,9 @@ Read more at the [Intel Debugger](../intel-suite/intel-debugger/) page.
 
 Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process - even if these processes are distributed across a cluster using an MPI implementation.
 
-```bash
-    $ module load Forge
-    $ forge
+```console
+$ ml Forge
+$ forge
 ```
 
 Read more at the [Allinea DDT](allinea-ddt/) page.
@@ -32,9 +32,9 @@ Read more at the [Allinea DDT](allinea-ddt/) page.
 
 Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs. Our license is limited to 64 MPI processes.
 
-```bash
-    $ module load PerformanceReports/6.0
-    $ perf-report mpirun -n 64 ./my_application argument01 argument02
+```console
+$ ml PerformanceReports/6.0
+$ perf-report mpirun -n 64 ./my_application argument01 argument02
 ```
 
 Read more at the [Allinea Performance Reports](allinea-performance-reports/) page.
@@ -43,9 +43,9 @@ Read more at the [Allinea Performance Reports](allinea-performance-reports/) pag
 
 TotalView is a source- and machine-level debugger for multi-process, multi-threaded programs. Its wide range of tools provides ways to analyze, organize, and test programs, making it easy to isolate and identify problems in individual threads and processes in programs of great complexity.
 
-```bash
-    $ module load TotalView/8.15.4-6-linux-x86-64
-    $ totalview
+```console
+$ ml TotalView/8.15.4-6-linux-x86-64
+$ totalview
 ```
 
 Read more at the [Totalview](total-view/) page.
@@ -54,8 +54,8 @@ Read more at the [Totalview](total-view/) page.
 
 Vampir is a GUI trace analyzer for traces in OTF format.
 
-```bash
-    $ module load Vampir/8.5.0
+```console
+    $ ml Vampir/8.5.0
     $ vampir
 ```
 
diff --git a/docs.it4i/salomon/software/debuggers/aislinn.md b/docs.it4i/salomon/software/debuggers/aislinn.md
index e1dee28b8d6d78ef7be2371afb2f8884f2b5f364..89cf7538016c004b1ba9058bcf148bbf0761eb50 100644
--- a/docs.it4i/salomon/software/debuggers/aislinn.md
+++ b/docs.it4i/salomon/software/debuggers/aislinn.md
@@ -49,13 +49,13 @@ The program does the following: process 0 receives two messages from anyone and
 
 To verify this program by Aislinn, we first load Aislinn itself:
 
-```bash
-$ module load aislinn
+```console
+$ ml aislinn
 ```
 
 Now we compile the program by Aislinn implementation of MPI. There are `mpicc` for C programs and `mpicxx` for C++ programs. Only MPI parts of the verified application has to be recompiled; non-MPI parts may remain untouched. Let us assume that our program is in `test.cpp`.
 
-```bash
+```console
 $ mpicc -g test.cpp -o test
 ```
 
@@ -63,7 +63,7 @@ The `-g` flag is not necessary, but it puts more debugging information into the
 
 Now we run the Aislinn itself. The argument `-p 3` specifies that we want to verify our program for the case of three MPI processes
 
-```bash
+```console
 $ aislinn -p 3 ./test
 ==AN== INFO: Aislinn v0.3.0
 ==AN== INFO: Found error 'Invalid write'
@@ -73,8 +73,8 @@ $ aislinn -p 3 ./test
 
 Aislinn found an error and produced HTML report. To view it, we can use any browser, e.g.:
 
-```bash
- $ firefox report.html
+```console
+$ firefox report.html
 ```
 
 At the beginning of the report there are some basic summaries of the verification. In the second part (depicted in the following picture), the error is described.
diff --git a/docs.it4i/salomon/software/debuggers/allinea-ddt.md b/docs.it4i/salomon/software/debuggers/allinea-ddt.md
index 41dd4c6e8266e257a425c0e7a8b54330c38ccf04..6e1f046f10fd2d521343a995cb59580440080a73 100644
--- a/docs.it4i/salomon/software/debuggers/allinea-ddt.md
+++ b/docs.it4i/salomon/software/debuggers/allinea-ddt.md
@@ -24,22 +24,21 @@ In case of debugging on accelerators:
 
 Load all necessary modules to compile the code. For example:
 
-```bash
-    $ module load intel
-    $ module load impi   ... or ... module load openmpi/X.X.X-icc
+```console
+$ ml intel
+$ ml impi **or** ml OpenMPI/X.X.X-icc
 ```
 
 Load the Allinea DDT module:
 
-```bash
-    $ module load Forge
+```console
+$ ml Forge
 ```
 
 Compile the code:
 
-```bash
+```console
 $ mpicc -g -O0 -o test_debug test.c
-
 $ mpif90 -g -O0 -o test_debug test.f
 ```
 
@@ -56,22 +55,22 @@ Before debugging, you need to compile your code with theses flags:
 
 Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh:
 
-```bash
-    $ ssh -X username@anselm.it4i.cz
+```console
+$ ssh -X username@anselm.it4i.cz
 ```
 
 Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)
 
 From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command:
 
-```bash
-    $ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
+```console
+$ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
 ```
 
 Then launch the debugger with the ddt command followed by the name of the executable to debug:
 
-```bash
-    $ ddt test_debug
+```console
+$ ddt test_debug
 ```
 
 A submission window that appears have a prefilled path to the executable to debug. You can select the number of MPI processors and/or OpenMP threads on which to run and press run. Command line arguments to a program can be entered to the "Arguments " box.
@@ -80,16 +79,16 @@ A submission window that appears have a prefilled path to the executable to debu
 
 To start the debugging directly without the submission window, user can specify the debugging and execution parameters from the command line. For example the number of MPI processes is set by option "-np 4". Skipping the dialog is done by "-start" option. To see the list of the "ddt" command line parameters, run "ddt --help".
 
-```bash
-    ddt -start -np 4 ./hello_debug_impi
+```console
+ddt -start -np 4 ./hello_debug_impi
 ```
 
 ## Documentation
 
 Users can find original User Guide after loading the DDT module:
 
-```bash
-    $DDTPATH/doc/userguide.pdf
+```console
+$DDTPATH/doc/userguide.pdf
 ```
 
 [1] Discipline, Magic, Inspiration and Science: Best Practice Debugging with Allinea DDT, Workshop conducted at LLNL by Allinea on May 10, 2013, [link](https://computing.llnl.gov/tutorials/allineaDDT/index.html)
diff --git a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md
index 3d0826e994bb6434b9cd0cd100249393191c03d3..ead91a093c83ba9503f2be7ba702e698d7bca0df 100644
--- a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md
+++ b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md
@@ -12,8 +12,8 @@ Our license is limited to 64 MPI processes.
 
 Allinea Performance Reports version 6.0 is available
 
-```bash
-    $ module load PerformanceReports/6.0
+```console
+$ ml PerformanceReports/6.0
 ```
 
 The module sets up environment variables, required for using the Allinea Performance Reports.
@@ -24,8 +24,8 @@ Use the the perf-report wrapper on your (MPI) program.
 
 Instead of [running your MPI program the usual way](../mpi/mpi/), use the the perf report wrapper:
 
-```bash
-    $ perf-report mpirun ./mympiprog.x
+```console
+$ perf-report mpirun ./mympiprog.x
 ```
 
 The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](../../job-submission-and-execution/).
@@ -36,23 +36,24 @@ In this example, we will be profiling the mympiprog.x MPI program, using Allinea
 
 First, we allocate some nodes via the express queue:
 
-```bash
-    $ qsub -q qexp -l select=2:ppn=24:mpiprocs=24:ompthreads=1 -I
+```console
+$ qsub -q qexp -l select=2:ppn=24:mpiprocs=24:ompthreads=1 -I
     qsub: waiting for job 262197.dm2 to start
     qsub: job 262197.dm2 ready
 ```
 
 Then we load the modules and run the program the usual way:
 
-```bash
-    $ module load intel impi PerfReports/6.0
-    $ mpirun ./mympiprog.x
+```console
+$ ml intel 
+$ ml PerfReports/6.0
+$ mpirun ./mympiprog.x
 ```
 
 Now lets profile the code:
 
-```bash
-    $ perf-report mpirun ./mympiprog.x
+```console
+$ perf-report mpirun ./mympiprog.x
 ```
 
 Performance report files [mympiprog_32p\*.txt](mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded.
diff --git a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
index 2fdbd18e166d3e553a8ad5719f7945f902cbd73c..192aece7e250dfb9b2938daebe83606a1f002b06 100644
--- a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
+++ b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
@@ -15,14 +15,14 @@ Intel *®* VTune™ Amplifier, part of Intel Parallel studio, is a GUI profiling
 
 To profile an application with VTune Amplifier, special kernel modules need to be loaded. The modules are not loaded on the login nodes, thus direct profiling on login nodes is not possible. By default, the kernel modules ale not loaded on compute nodes neither. In order to have the modules loaded, you need to specify vtune=version PBS resource at job submit. The version is the same as for environment module. For example to use VTune/2016_update1:
 
-```bash
-    $ qsub -q qexp -A OPEN-0-0 -I -l select=1,vtune=2016_update1
+```console
+$ qsub -q qexp -A OPEN-0-0 -I -l select=1,vtune=2016_update1
 ```
 
 After that, you can verify the modules sep\*, pax and vtsspp are present in the kernel :
 
-```bash
-    $ lsmod | grep -e sep -e pax -e vtsspp
+```console
+$ lsmod | grep -e sep -e pax -e vtsspp
     vtsspp 362000 0
     sep3_15 546657 0
     pax 4312 0
@@ -30,14 +30,14 @@ After that, you can verify the modules sep\*, pax and vtsspp are present in the
 
 To launch the GUI, first load the module:
 
-```bash
-    $ module add VTune/2016_update1
+```console
+$ module add VTune/2016_update1
 ```
 
 and launch the GUI :
 
-```bash
-    $ amplxe-gui
+```console
+$ amplxe-gui
 ```
 
 The GUI will open in new window. Click on "New Project..." to create a new project. After clicking OK, a new window with project properties will appear.  At "Application:", select the bath to your binary you want to profile (the binary should be compiled with -g flag). Some additional options such as command line arguments can be selected. At "Managed code profiling mode:" select "Native" (unless you want to profile managed mode .NET/Mono applications). After clicking OK, your project is created.
@@ -50,8 +50,8 @@ VTune Amplifier also allows a form of remote analysis. In this mode, data for an
 
 The command line will look like this:
 
-```bash
-    /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -collect advanced-hotspots -app-working-dir /home/sta545/tmp -- /home/sta545/tmp/sgemm
+```console
+/apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -collect advanced-hotspots -app-working-dir /home/sta545/tmp -- /home/sta545/tmp/sgemm
 ```
 
 Copy the line to clipboard and then you can paste it in your jobscript or in command line. After the collection is run, open the GUI once again, click the menu button in the upper right corner, and select "Open > Result...". The GUI will load the results from the run.
@@ -75,14 +75,14 @@ You may also use remote analysis to collect data from the MIC and then analyze i
 
 Native launch:
 
-```bash
-    $ /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -target-system mic-native:0 -collect advanced-hotspots -- /home/sta545/tmp/vect-add-mic
+```console
+$ /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -target-system mic-native:0 -collect advanced-hotspots -- /home/sta545/tmp/vect-add-mic
 ```
 
 Host launch:
 
-```bash
-    $ /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -target-system mic-host-launch:0 -collect advanced-hotspots -- /home/sta545/tmp/sgemm
+```console
+$ /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -target-system mic-host-launch:0 -collect advanced-hotspots -- /home/sta545/tmp/sgemm
 ```
 
 You can obtain this command line by pressing the "Command line..." button on Analysis Type screen.
diff --git a/docs.it4i/salomon/software/debuggers/total-view.md b/docs.it4i/salomon/software/debuggers/total-view.md
index f4f69278ff59e8f2cd35aad8b5c79bf78a4a0171..0235c845d012f4c0f5245e7ae2c5f8d96b6efe3c 100644
--- a/docs.it4i/salomon/software/debuggers/total-view.md
+++ b/docs.it4i/salomon/software/debuggers/total-view.md
@@ -6,7 +6,7 @@ TotalView is a GUI-based source code multi-process, multi-thread debugger.
 
 On the cluster users can debug OpenMP or MPI code that runs up to 64 parallel processes. These limitation means that:
 
-```bash
+```console
     1 user can debug up 64 processes, or
     32 users can debug 2 processes, etc.
 ```
@@ -21,23 +21,20 @@ You can check the status of the licenses [here](https://extranet.it4i.cz/rsweb/a
 
 Load all necessary modules to compile the code. For example:
 
-```bash
-    module load intel
-
-    module load impi   ... or ... module load OpenMPI/X.X.X-icc
+```console
+    ml intel
 ```
 
 Load the TotalView module:
 
-```bash
-    module load TotalView/8.15.4-6-linux-x86-64
+```console
+    ml TotalView/8.15.4-6-linux-x86-64
 ```
 
 Compile the code:
 
-```bash
+```console
     mpicc -g -O0 -o test_debug test.c
-
     mpif90 -g -O0 -o test_debug test.f
 ```
 
@@ -54,16 +51,16 @@ Before debugging, you need to compile your code with theses flags:
 
 Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh:
 
-```bash
-    ssh -X username@salomon.it4i.cz
+```console
+ssh -X username@salomon.it4i.cz
 ```
 
 Other options is to access login node using VNC. Please see the detailed information on how to use graphic user interface on Anselm.
 
 From the login node an interactive session with X windows forwarding (-X option) can be started by following command:
 
-```bash
-    qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=24:mpiprocs=24,walltime=01:00:00
+```console
+$ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=24:mpiprocs=24,walltime=01:00:00
 ```
 
 Then launch the debugger with the totalview command followed by the name of the executable to debug.
@@ -72,8 +69,8 @@ Then launch the debugger with the totalview command followed by the name of the
 
 To debug a serial code use:
 
-```bash
-    totalview test_debug
+```console
+totalview test_debug
 ```
 
 ### Debugging a Parallel Code - Option 1
@@ -83,7 +80,7 @@ To debug a parallel code compiled with **OpenMPI** you need to setup your TotalV
 !!! hint
     To be able to run parallel debugging procedure from the command line without stopping the debugger in the mpiexec source code you have to add the following function to your **~/.tvdrc** file.
 
-```bash
+```console
     proc mpi_auto_run_starter {loaded_id} {
         set starter_programs {mpirun mpiexec orterun}
         set executable_name [TV::symbol get $loaded_id full_pathname]
@@ -105,23 +102,23 @@ To debug a parallel code compiled with **OpenMPI** you need to setup your TotalV
 
 The source code of this function can be also found in
 
-```bash
-    /apps/all/OpenMPI/1.10.1-GNU-4.9.3-2.25/etc/openmpi-totalview.tcl
+```console
+$ /apps/all/OpenMPI/1.10.1-GNU-4.9.3-2.25/etc/openmpi-totalview.tcl
 ```
 
 You can also add only following line to you ~/.tvdrc file instead of
 the entire function:
 
-```bash
-source /apps/all/OpenMPI/1.10.1-GNU-4.9.3-2.25/etc/openmpi-totalview.tcl
+```console
+$ source /apps/all/OpenMPI/1.10.1-GNU-4.9.3-2.25/etc/openmpi-totalview.tcl
 ```
 
 You need to do this step only once. See also [OpenMPI FAQ entry](https://www.open-mpi.org/faq/?category=running#run-with-tv)
 
 Now you can run the parallel debugger using:
 
-```bash
-    mpirun -tv -n 5 ./test_debug
+```console
+$ mpirun -tv -n 5 ./test_debug
 ```
 
 When following dialog appears click on "Yes"
@@ -138,10 +135,10 @@ Other option to start new parallel debugging session from a command line is to l
 
 The following example shows how to start debugging session with Intel MPI:
 
-```bash
-    module load intel/2015b-intel-2015b impi/5.0.3.048-iccifort-2015.3.187-GNU-5.1.0-2.25 TotalView/8.15.4-6-linux-x86-64
-
-    totalview -mpi "Intel MPI-Hydra" -np 8 ./hello_debug_impi
+```console
+$ ml intel
+$ ml TotalView/8.15.4-6-linux-x86-64
+$ totalview -mpi "Intel MPI-Hydra" -np 8 ./hello_debug_impi
 ```
 
 After running previous command you will see the same window as shown in the screenshot above.
diff --git a/docs.it4i/salomon/software/debuggers/valgrind.md b/docs.it4i/salomon/software/debuggers/valgrind.md
index 430118785a08bc43e67a4711396f9ac6b63c4afb..188f98502862effe90495934c6288aa64b042318 100644
--- a/docs.it4i/salomon/software/debuggers/valgrind.md
+++ b/docs.it4i/salomon/software/debuggers/valgrind.md
@@ -47,9 +47,9 @@ For example, lets look at this C code, which has two problems:
 
 Now, compile it with Intel compiler:
 
-```bash
-    $ module add intel
-    $ icc -g valgrind-example.c -o valgrind-example
+```console
+$ module add intel
+$ icc -g valgrind-example.c -o valgrind-example
 ```
 
 Now, lets run it with Valgrind. The syntax is:
@@ -58,8 +58,8 @@ valgrind [valgrind options] < your program binary > [your program options]
 
 If no Valgrind options are specified, Valgrind defaults to running Memcheck tool. Please refer to the Valgrind documentation for a full description of command line options.
 
-```bash
-    $ valgrind ./valgrind-example
+```console
+$ valgrind ./valgrind-example
     ==12652== Memcheck, a memory error detector
     ==12652== Copyright (C) 2002-2013, and GNU GPL'd, by Julian Seward et al.
     ==12652== Using Valgrind-3.9.0 and LibVEX; rerun with -h for copyright info
@@ -92,8 +92,8 @@ If no Valgrind options are specified, Valgrind defaults to running Memcheck tool
 
 In the output we can see that Valgrind has detected both errors - the off-by-one memory access at line 5 and a memory leak of 40 bytes. If we want a detailed analysis of the memory leak, we need to run Valgrind with  --leak-check=full option:
 
-```bash
-    $ valgrind --leak-check=full ./valgrind-example
+```console
+$ valgrind --leak-check=full ./valgrind-example
     ==23856== Memcheck, a memory error detector
     ==23856== Copyright (C) 2002-2010, and GNU GPL'd, by Julian Seward et al.
     ==23856== Using Valgrind-3.6.0 and LibVEX; rerun with -h for copyright info
@@ -134,13 +134,13 @@ Now we can see that the memory leak is due to the malloc() at line 6.
 
 Although Valgrind is not primarily a parallel debugger, it can be used to debug parallel applications as well. When launching your parallel applications, prepend the valgrind command. For example:
 
-```bash
-    $ mpirun -np 4 valgrind myapplication
+```console
+$ mpirun -np 4 valgrind myapplication
 ```
 
 The default version without MPI support will however report a large number of false errors in the MPI library, such as:
 
-```bash
+```console
     ==30166== Conditional jump or move depends on uninitialised value(s)
     ==30166== at 0x4C287E8: strlen (mc_replace_strmem.c:282)
     ==30166== by 0x55443BD: I_MPI_Processor_model_number (init_interface.c:427)
@@ -181,16 +181,16 @@ Lets look at this MPI example:
 
 There are two errors - use of uninitialized memory and invalid length of the buffer. Lets debug it with valgrind :
 
-```bash
-    $ module add intel impi
-    $ mpiicc -g valgrind-example-mpi.c -o valgrind-example-mpi
-    $ module add Valgrind/3.11.0-intel-2015b
-    $ mpirun -np 2 -env LD_PRELOAD $EBROOTVALGRIND/lib/valgrind/libmpiwrap-amd64-linux.so valgrind ./valgrind-example-mpi
+```console
+$ module add intel impi
+$ mpiicc -g valgrind-example-mpi.c -o valgrind-example-mpi
+$ module add Valgrind/3.11.0-intel-2015b
+$ mpirun -np 2 -env LD_PRELOAD $EBROOTVALGRIND/lib/valgrind/libmpiwrap-amd64-linux.so valgrind ./valgrind-example-mpi
 ```
 
 Prints this output : (note that there is output printed for every launched MPI process)
 
-```bash
+```console
     ==31318== Memcheck, a memory error detector
     ==31318== Copyright (C) 2002-2013, and GNU GPL'd, by Julian Seward et al.
     ==31318== Using Valgrind-3.9.0 and LibVEX; rerun with -h for copyright info
diff --git a/docs.it4i/salomon/software/debuggers/vampir.md b/docs.it4i/salomon/software/debuggers/vampir.md
index 99053546c14b43c51d5ab7728dfa3824f2016170..852374d229d2c4f4a2e4c612c85d25b1c121faf0 100644
--- a/docs.it4i/salomon/software/debuggers/vampir.md
+++ b/docs.it4i/salomon/software/debuggers/vampir.md
@@ -6,11 +6,13 @@ Vampir is a commercial trace analysis and visualisation tool. It can work with t
 
 ## Installed Versions
 
-Version 8.5.0 is currently installed as module Vampir/8.5.0 :
+```console
+$ ml av Vampir
+```
 
-```bash
-    $ module load Vampir/8.5.0
-    $ vampir &
+```console
+$ ml Vampir
+$ vampir &
 ```
 
 ## User Manual
diff --git a/docs.it4i/salomon/software/intel-suite/intel-advisor.md b/docs.it4i/salomon/software/intel-suite/intel-advisor.md
index 427f5c98cfccf29de4870043c08074ac1a246135..688deda17708cc23578fd50dc6063fb7716c5858 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-advisor.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-advisor.md
@@ -16,8 +16,8 @@ Profiling is possible either directly from the GUI, or from command line.
 
 To profile from GUI, launch Advisor:
 
-```bash
-    $ advixe-gui
+```console
+$ advixe-gui
 ```
 
 Then select menu File -> New -> Project. Choose a directory to save project data to. After clicking OK, Project properties window will appear, where you can configure path to your binary, launch arguments, working directory etc. After clicking OK, the project is ready.
diff --git a/docs.it4i/salomon/software/intel-suite/intel-compilers.md b/docs.it4i/salomon/software/intel-suite/intel-compilers.md
index 63a05bd91e15c04afa6a3cc8d21231ba030437bc..8e2ee714f6e5c61ec8b4e3b4522a3a06fdd11f46 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-compilers.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-compilers.md
@@ -2,28 +2,28 @@
 
 The Intel compilers in multiple versions are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler.
 
-```bash
-    $ module load intel
-    $ icc -v
-    $ ifort -v
+```console
+$ ml intel
+$ icc -v
+$ ifort -v
 ```
 
 The intel compilers provide for vectorization of the code, via the AVX2 instructions and support threading parallelization via OpenMP
 
 For maximum performance on the Salomon cluster compute nodes, compile your programs using the AVX2 instructions, with reporting where the vectorization was used. We recommend following compilation options for high performance
 
-```bash
-    $ icc   -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec myprog.c mysubroutines.c -o myprog.x
-    $ ifort -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec myprog.f mysubroutines.f -o myprog.x
+```console
+$ icc   -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec myprog.c mysubroutines.c -o myprog.x
+$ ifort -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec myprog.f mysubroutines.f -o myprog.x
 ```
 
 In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggresive loop optimizations (-O3) and vectorization (-xCORE-AVX2)
 
 The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP parallelization and AVX2 vectorization. Enable the OpenMP parallelization by the **-openmp** compiler switch.
 
-```bash
-    $ icc -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec -openmp myprog.c mysubroutines.c -o myprog.x
-    $ ifort -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec -openmp myprog.f mysubroutines.f -o myprog.x
+```console
+$ icc -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec -openmp myprog.c mysubroutines.c -o myprog.x
+$ ifort -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec -openmp myprog.f mysubroutines.f -o myprog.x
 ```
 
 Read more at <https://software.intel.com/en-us/intel-cplusplus-compiler-16.0-user-and-reference-guide>
diff --git a/docs.it4i/salomon/software/intel-suite/intel-debugger.md b/docs.it4i/salomon/software/intel-suite/intel-debugger.md
index d0fef6ab7fbe2e50e8e7f8238585521bb5cb9695..15788c798785390777016856b8ffcc111227c1d2 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-debugger.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-debugger.md
@@ -6,31 +6,30 @@ IDB is no longer available since Intel Parallel Studio 2015
 
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI.
 
-```bash
-    $ module load intel/2014.06
-    $ module load Java
-    $ idb
+```console
+$ ml intel
+$ ml Java
+$ idb
 ```
 
 The debugger may run in text mode. To debug in text mode, use
 
-```bash
-    $ idbc
+```console
+$ idbc
 ```
 
 To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)
 
 Example:
 
-```bash
-    $ qsub -q qexp -l select=1:ncpus=24 -X -I
+```console
+$ qsub -q qexp -l select=1:ncpus=24 -X -I
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19654.srv11 ready
-
-    $ module load intel
-    $ module load Java
-    $ icc -O0 -g myprog.c -o myprog.x
-    $ idb ./myprog.x
+$ ml intel
+$ ml Java
+$ icc -O0 -g myprog.c -o myprog.x
+$ idb ./myprog.x
 ```
 
 In this example, we allocate 1 full compute node, compile program myprog.c with debugging options -O0 -g and run the idb debugger interactively on the myprog.x executable. The GUI access is via X11 port forwarding provided by the PBS workload manager.
@@ -43,13 +42,12 @@ In this example, we allocate 1 full compute node, compile program myprog.c with
 
 For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way:
 
-```bash
-    $ qsub -q qexp -l select=2:ncpus=24 -X -I
+```console
+$ qsub -q qexp -l select=2:ncpus=24 -X -I
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19655.srv11 ready
-
-    $ module load intel impi
-    $ mpirun -ppn 1 -hostfile $PBS_NODEFILE --enable-x xterm -e idbc ./mympiprog.x
+$ ml intel
+$ mpirun -ppn 1 -hostfile $PBS_NODEFILE --enable-x xterm -e idbc ./mympiprog.x
 ```
 
 In this example, we allocate 2 full compute node, run xterm on each node and start idb debugger in command line mode, debugging two ranks of mympiprog.x application. The xterm will pop up for each rank, with idb prompt ready. The example is not limited to use of Intel MPI
@@ -58,13 +56,12 @@ In this example, we allocate 2 full compute node, run xterm on each node and sta
 
 Run the idb debugger from within the MPI debug option. This will cause the debugger to bind to all ranks and provide aggregated outputs across the ranks, pausing execution automatically just after startup. You may then set break points and step the execution manually. Using Intel MPI:
 
-```bash
-    $ qsub -q qexp -l select=2:ncpus=24 -X -I
+```console
+$ qsub -q qexp -l select=2:ncpus=24 -X -I
     qsub: waiting for job 19654.srv11 to start
     qsub: job 19655.srv11 ready
-
-    $ module load intel impi
-    $ mpirun -n 48 -idb ./mympiprog.x
+$ ml intel
+$ mpirun -n 48 -idb ./mympiprog.x
 ```
 
 ### Debugging Multithreaded Application
diff --git a/docs.it4i/salomon/software/intel-suite/intel-inspector.md b/docs.it4i/salomon/software/intel-suite/intel-inspector.md
index 6231a65347abc13d442aea0586d6003ac7d3c798..bd298923813d786c7620c751a3c267983bb2a48d 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-inspector.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-inspector.md
@@ -18,8 +18,8 @@ Debugging is possible either directly from the GUI, or from command line.
 
 To debug from GUI, launch Inspector:
 
-```bash
-    $ inspxe-gui &
+```console
+$ inspxe-gui &
 ```
 
 Then select menu File -> New -> Project. Choose a directory to save project data to. After clicking OK, Project properties window will appear, where you can configure path to your binary, launch arguments, working directory etc. After clicking OK, the project is ready.
diff --git a/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md b/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md
index ead2008dc115bd5b8d7d76a623e9fe22b9161d56..60628eed0744d4305f79f4b77ff2f4de8e11c10d 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md
@@ -6,8 +6,8 @@ Intel Integrated Performance Primitives, version 9.0.1, compiled for AVX2 vector
 
 Check out IPP before implementing own math functions for data processing, it is likely already there.
 
-```bash
-    $ module load ipp
+```console
+$ ml ipp
 ```
 
 The module sets up environment variables, required for linking and running ipp enabled applications.
@@ -57,20 +57,18 @@ The module sets up environment variables, required for linking and running ipp e
 
 Compile above example, using any compiler and the ipp module.
 
-```bash
-    $ module load intel
-    $ module load ipp
-
-    $ icc testipp.c -o testipp.x -lippi -lipps -lippcore
+```console
+$ ml intel
+$ ml ipp
+$ icc testipp.c -o testipp.x -lippi -lipps -lippcore
 ```
 
 You will need the ipp module loaded to run the ipp enabled executable. This may be avoided, by compiling library search paths into the executable
 
-```bash
-    $ module load intel
-    $ module load ipp
-
-    $ icc testipp.c -o testipp.x -Wl,-rpath=$LIBRARY_PATH -lippi -lipps -lippcore
+```console
+$ ml intel
+$ ml ipp
+$ icc testipp.c -o testipp.x -Wl,-rpath=$LIBRARY_PATH -lippi -lipps -lippcore
 ```
 
 ## Code Samples and Documentation
diff --git a/docs.it4i/salomon/software/intel-suite/intel-mkl.md b/docs.it4i/salomon/software/intel-suite/intel-mkl.md
index 322492010827e5dc2cc63d6ccd7cb3452f1a4214..6b54e0890202f817dd42c04eabf886489bd695d0 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-mkl.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-mkl.md
@@ -17,8 +17,8 @@ For details see the [Intel MKL Reference Manual](http://software.intel.com/sites
 
 Intel MKL version 11.2.3.187 is available on the cluster
 
-```bash
-    $ module load imkl
+```console
+$ ml imkl
 ```
 
 The module sets up environment variables, required for linking and running mkl enabled applications. The most important variables are the $MKLROOT, $CPATH, $LD_LIBRARY_PATH and $MKL_EXAMPLES
@@ -40,8 +40,8 @@ Linking Intel MKL libraries may be complex. Intel [mkl link line advisor](http:/
 
 You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line:
 
-```bash
-    $ icc .... -Wl,-rpath=$LIBRARY_PATH ...
+```console
+$ icc .... -Wl,-rpath=$LIBRARY_PATH ...
 ```
 
 ### Threading
@@ -50,9 +50,9 @@ Advantage in using Intel MKL library is that it brings threaded parallelization
 
 For this to work, the application must link the threaded MKL library (default). Number and behaviour of MKL threads may be controlled via the OpenMP environment variables, such as OMP_NUM_THREADS and KMP_AFFINITY. MKL_NUM_THREADS takes precedence over OMP_NUM_THREADS
 
-```bash
-    $ export OMP_NUM_THREADS=24
-    $ export KMP_AFFINITY=granularity=fine,compact,1,0
+```console
+$ export OMP_NUM_THREADS=24
+$ export KMP_AFFINITY=granularity=fine,compact,1,0
 ```
 
 The application will run with 24 threads with affinity optimized for fine grain parallelization.
@@ -63,50 +63,45 @@ Number of examples, demonstrating use of the Intel MKL library and its linking i
 
 ### Working With Examples
 
-```bash
-    $ module load intel
-    $ module load imkl
-    $ cp -a $MKL_EXAMPLES/cblas /tmp/
-    $ cd /tmp/cblas
-
-    $ make sointel64 function=cblas_dgemm
+```console
+$ ml intel
+$ ml imkl
+$ cp -a $MKL_EXAMPLES/cblas /tmp/
+$ cd /tmp/cblas
+$ make sointel64 function=cblas_dgemm
 ```
 
 In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL example suite installed on clusters.
 
 ### Example: MKL and Intel Compiler
 
-```bash
-    $ module load intel
-    $ module load imkl
-    $ cp -a $MKL_EXAMPLES/cblas /tmp/
-    $ cd /tmp/cblas
-    $
-    $ icc -w source/cblas_dgemmx.c source/common_func.c -mkl -o cblas_dgemmx.x
-    $ ./cblas_dgemmx.x data/cblas_dgemmx.d
+```console
+$ ml intel
+$ ml imkl
+$ cp -a $MKL_EXAMPLES/cblas /tmp/
+$ cd /tmp/cblas
+$
+$ icc -w source/cblas_dgemmx.c source/common_func.c -mkl -o cblas_dgemmx.x
+$ ./cblas_dgemmx.x data/cblas_dgemmx.d
 ```
 
 In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL with icc -mkl option. Using the -mkl option is equivalent to:
 
-```bash
-    $ icc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x
-    -I$MKL_INC_DIR -L$MKL_LIB_DIR -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5
+```console
+$ icc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x -I$MKL_INC_DIR -L$MKL_LIB_DIR -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5
 ```
 
 In this example, we compile and link the cblas_dgemm example, using LP64 interface to threaded MKL and Intel OMP threads implementation.
 
 ### Example: Intel MKL and GNU Compiler
 
-```bash
-    $ module load GCC
-    $ module load imkl
-    $ cp -a $MKL_EXAMPLES/cblas /tmp/
-    $ cd /tmp/cblas
-
-    $ gcc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x
-    -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lm
-
-    $ ./cblas_dgemmx.x data/cblas_dgemmx.d
+```console
+$ ml GCC
+$ ml imkl
+$ cp -a $MKL_EXAMPLES/cblas /tmp/
+$ cd /tmp/cblas
+$ gcc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lm
+$ ./cblas_dgemmx.x data/cblas_dgemmx.d
 ```
 
 In this example, we compile, link and run the cblas_dgemm example, using LP64 interface to threaded MKL and gnu OMP threads implementation.
diff --git a/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md b/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md
index 4b1c9308957a43fafafb8f5c1280c11ba2bf81a1..b22274a0e0a4c32942b15ba90244621eba21aa54 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md
@@ -17,10 +17,10 @@ Intel Parallel Studio XE
 
 The Intel compilers version 131.3 are available, via module iccifort/2013.5.192-GCC-4.8.3. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler.
 
-```bash
-    $ module load intel
-    $ icc -v
-    $ ifort -v
+```console
+$ ml intel
+$ icc -v
+$ ifort -v
 ```
 
 Read more at the [Intel Compilers](intel-compilers/) page.
@@ -31,9 +31,9 @@ IDB is no longer available since Parallel Studio 2015.
 
 The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment.
 
-```bash
-    $ module load intel
-    $ idb
+```console
+$ ml intel
+$ idb
 ```
 
 Read more at the [Intel Debugger](intel-debugger/) page.
@@ -42,8 +42,8 @@ Read more at the [Intel Debugger](intel-debugger/) page.
 
 Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL unites and provides these basic components: BLAS, LAPACK, ScaLapack, PARDISO, FFT, VML, VSL, Data fitting, Feast Eigensolver and many more.
 
-```bash
-    $ module load imkl
+```console
+$ ml imkl
 ```
 
 Read more at the [Intel MKL](intel-mkl/) page.
@@ -52,8 +52,8 @@ Read more at the [Intel MKL](intel-mkl/) page.
 
 Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is available, via module ipp. The IPP is a library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax and many more.
 
-```bash
-    $ module load ipp
+```console
+$ ml ipp
 ```
 
 Read more at the [Intel IPP](intel-integrated-performance-primitives/) page.
@@ -62,8 +62,8 @@ Read more at the [Intel IPP](intel-integrated-performance-primitives/) page.
 
 Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. It is designed to promote scalable data parallel programming. Additionally, it fully supports nested parallelism, so you can build larger parallel components from smaller parallel components. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner.
 
-```bash
-    $ module load tbb
+```console
+$ ml tbb
 ```
 
 Read more at the [Intel TBB](intel-tbb/) page.
diff --git a/docs.it4i/salomon/software/intel-suite/intel-tbb.md b/docs.it4i/salomon/software/intel-suite/intel-tbb.md
index 94e32f39073b41801f20391b04cc5081f99649f7..59976aa7ef31d2e97e9799ced80578be11a2d8ab 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-tbb.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-tbb.md
@@ -4,10 +4,10 @@
 
 Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers.  To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](../intel-xeon-phi/).
 
-Intel TBB version 4.3.5.187 is available on the cluster.
+Intel is available on the cluster.
 
-```bash
-    $ module load tbb
+```console
+$ ml av tbb
 ```
 
 The module sets up environment variables, required for linking and running tbb enabled applications.
@@ -18,21 +18,21 @@ Link the tbb library, using -ltbb
 
 Number of examples, demonstrating use of TBB and its built-in scheduler is available on Anselm, in the $TBB_EXAMPLES directory.
 
-```bash
-    $ module load intel
-    $ module load tbb
-    $ cp -a $TBB_EXAMPLES/common $TBB_EXAMPLES/parallel_reduce /tmp/
-    $ cd /tmp/parallel_reduce/primes
-    $ icc -O2 -DNDEBUG -o primes.x main.cpp primes.cpp -ltbb
-    $ ./primes.x
+```console
+$ ml intel
+$ ml tbb
+$ cp -a $TBB_EXAMPLES/common $TBB_EXAMPLES/parallel_reduce /tmp/
+$ cd /tmp/parallel_reduce/primes
+$ icc -O2 -DNDEBUG -o primes.x main.cpp primes.cpp -ltbb
+$ ./primes.x
 ```
 
 In this example, we compile, link and run the primes example, demonstrating use of parallel task-based reduce in computation of prime numbers.
 
 You will need the tbb module loaded to run the tbb enabled executable. This may be avoided, by compiling library search paths into the executable.
 
-```bash
-    $ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb
+```console
+$ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb
 ```
 
 ## Further Reading
diff --git a/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md b/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md
index 5d4513d306d1b9a4bf159c71231c9677cc2b8165..9cae361ca43dccb382bd5b09f5c5a9d270e0414c 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md
@@ -12,9 +12,9 @@ Currently on Salomon is version 9.1.2.024 available as module itac/9.1.2.024
 
 ITAC can collect traces from applications that are using Intel MPI. To generate a trace, simply add -trace option to your mpirun command :
 
-```bash
-    $ module load itac/9.1.2.024
-    $ mpirun -trace myapp
+```console
+$ ml itac/9.1.2.024
+$ mpirun -trace myapp
 ```
 
 The trace will be saved in file myapp.stf in the current directory.
@@ -23,9 +23,9 @@ The trace will be saved in file myapp.stf in the current directory.
 
 To view and analyze the trace, open ITAC GUI in a [graphical environment](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/):
 
-```bash
-    $ module load itac/9.1.2.024
-    $ traceanalyzer
+```console
+$ ml itac/9.1.2.024
+$ traceanalyzer
 ```
 
 The GUI will launch and you can open the produced `*`.stf file.
diff --git a/docs.it4i/salomon/software/intel-xeon-phi.md b/docs.it4i/salomon/software/intel-xeon-phi.md
index 26c87cb2aab21e606d205161f2a3b62bf4058d2c..6d161439b7871e097ae095a4103a1f37ab490a0e 100644
--- a/docs.it4i/salomon/software/intel-xeon-phi.md
+++ b/docs.it4i/salomon/software/intel-xeon-phi.md
@@ -2,150 +2,196 @@
 
 ## Guide to Intel Xeon Phi Usage
 
-Intel Xeon Phi can be programmed in several modes. The default mode on Anselm is offload mode, but all modes described in this document are supported.
+Intel Xeon Phi accelerator can be programmed in several modes. The default mode on the cluster is offload mode, but all modes described in this document are supported.
 
 ## Intel Utilities for Xeon Phi
 
 To get access to a compute node with Intel Xeon Phi accelerator, use the PBS interactive session
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0
+```console
+$ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
 ```
 
-To set up the environment module "Intel" has to be loaded
+To set up the environment module "intel" has to be loaded, without specifying the version, default version is loaded (at time of writing this, it's 2015b)
 
-```bash
-    $ module load intel/13.5.192
+```console
+$ ml intel
 ```
 
 Information about the hardware can be obtained by running the micinfo program on the host.
 
-```bash
-    $ /usr/bin/micinfo
-```
-
-The output of the "micinfo" utility executed on one of the Anselm node is as follows. (note: to get PCIe related details the command has to be run with root privileges)
-
-```bash
-    MicInfo Utility Log
-
-    Created Mon Jul 22 00:23:50 2013
-
-            System Info
-                    HOST OS                 : Linux
-                    OS Version              : 2.6.32-279.5.2.bl6.Bull.33.x86_64
-                    Driver Version          : 6720-15
-                    MPSS Version            : 2.1.6720-15
-                    Host Physical Memory    : 98843 MB
-
-    Device No: 0, Device Name: mic0
-
-            Version
-                    Flash Version            : 2.1.03.0386
-                    SMC Firmware Version     : 1.15.4830
-                    SMC Boot Loader Version  : 1.8.4326
-                    uOS Version              : 2.6.38.8-g2593b11
-                    Device Serial Number     : ADKC30102482
-
-            Board
-                    Vendor ID                : 0x8086
-                    Device ID                : 0x2250
-                    Subsystem ID             : 0x2500
-                    Coprocessor Stepping ID  : 3
-                    PCIe Width               : x16
-                    PCIe Speed               : 5 GT/s
-                    PCIe Max payload size    : 256 bytes
-                    PCIe Max read req size   : 512 bytes
-                    Coprocessor Model        : 0x01
-                    Coprocessor Model Ext    : 0x00
-                    Coprocessor Type         : 0x00
-                    Coprocessor Family       : 0x0b
-                    Coprocessor Family Ext   : 0x00
-                    Coprocessor Stepping     : B1
-                    Board SKU                : B1PRQ-5110P/5120D
-                    ECC Mode                 : Enabled
-                    SMC HW Revision          : Product 225W Passive CS
-
-            Cores
-                    Total No of Active Cores : 60
-                    Voltage                  : 1032000 uV
-                    Frequency                : 1052631 kHz
-
-            Thermal
-                    Fan Speed Control        : N/A
-                    Fan RPM                  : N/A
-                    Fan PWM                  : N/A
-                    Die Temp                 : 49 C
-
-            GDDR
-                    GDDR Vendor              : Elpida
-                    GDDR Version             : 0x1
-                    GDDR Density             : 2048 Mb
-                    GDDR Size                : 7936 MB
-                    GDDR Technology          : GDDR5
-                    GDDR Speed               : 5.000000 GT/s
-                    GDDR Frequency           : 2500000 kHz
-                    GDDR Voltage             : 1501000 uV
+```console
+$ /usr/bin/micinfo
+```
+
+The output of the "micinfo" utility executed on one of the cluster node is as follows. (note: to get PCIe related details the command has to be run with root privileges)
+
+```console
+MicInfo Utility Log
+Created Mon Aug 17 13:55:59 2015
+
+
+	System Info
+		HOST OS			: Linux
+		OS Version		: 2.6.32-504.16.2.el6.x86_64
+		Driver Version		: 3.4.1-1
+		MPSS Version		: 3.4.1
+		Host Physical Memory	: 131930 MB
+
+Device No: 0, Device Name: mic0
+
+	Version
+		Flash Version 		 : 2.1.02.0390
+		SMC Firmware Version	 : 1.16.5078
+		SMC Boot Loader Version	 : 1.8.4326
+		uOS Version 		 : 2.6.38.8+mpss3.4.1
+		Device Serial Number 	 : ADKC44601414
+
+	Board
+		Vendor ID 		 : 0x8086
+		Device ID 		 : 0x225c
+		Subsystem ID 		 : 0x7d95
+		Coprocessor Stepping ID	 : 2
+		PCIe Width 		 : x16
+		PCIe Speed 		 : 5 GT/s
+		PCIe Max payload size	 : 256 bytes
+		PCIe Max read req size	 : 512 bytes
+		Coprocessor Model	 : 0x01
+		Coprocessor Model Ext	 : 0x00
+		Coprocessor Type	 : 0x00
+		Coprocessor Family	 : 0x0b
+		Coprocessor Family Ext	 : 0x00
+		Coprocessor Stepping 	 : C0
+		Board SKU 		 : C0PRQ-7120 P/A/X/D
+		ECC Mode 		 : Enabled
+		SMC HW Revision 	 : Product 300W Passive CS
+
+	Cores
+		Total No of Active Cores : 61
+		Voltage 		 : 1007000 uV
+		Frequency		 : 1238095 kHz
+
+	Thermal
+		Fan Speed Control 	 : N/A
+		Fan RPM 		 : N/A
+		Fan PWM 		 : N/A
+		Die Temp		 : 60 C
+
+	GDDR
+		GDDR Vendor		 : Samsung
+		GDDR Version		 : 0x6
+		GDDR Density		 : 4096 Mb
+		GDDR Size		 : 15872 MB
+		GDDR Technology		 : GDDR5 
+		GDDR Speed		 : 5.500000 GT/s 
+		GDDR Frequency		 : 2750000 kHz
+		GDDR Voltage		 : 1501000 uV
+
+Device No: 1, Device Name: mic1
+
+	Version
+		Flash Version 		 : 2.1.02.0390
+		SMC Firmware Version	 : 1.16.5078
+		SMC Boot Loader Version	 : 1.8.4326
+		uOS Version 		 : 2.6.38.8+mpss3.4.1
+		Device Serial Number 	 : ADKC44500454
+
+	Board
+		Vendor ID 		 : 0x8086
+		Device ID 		 : 0x225c
+		Subsystem ID 		 : 0x7d95
+		Coprocessor Stepping ID	 : 2
+		PCIe Width 		 : x16
+		PCIe Speed 		 : 5 GT/s
+		PCIe Max payload size	 : 256 bytes
+		PCIe Max read req size	 : 512 bytes
+		Coprocessor Model	 : 0x01
+		Coprocessor Model Ext	 : 0x00
+		Coprocessor Type	 : 0x00
+		Coprocessor Family	 : 0x0b
+		Coprocessor Family Ext	 : 0x00
+		Coprocessor Stepping 	 : C0
+		Board SKU 		 : C0PRQ-7120 P/A/X/D
+		ECC Mode 		 : Enabled
+		SMC HW Revision 	 : Product 300W Passive CS
+
+	Cores
+		Total No of Active Cores : 61
+		Voltage 		 : 998000 uV
+		Frequency		 : 1238095 kHz
+
+	Thermal
+		Fan Speed Control 	 : N/A
+		Fan RPM 		 : N/A
+		Fan PWM 		 : N/A
+		Die Temp		 : 59 C
+
+	GDDR
+		GDDR Vendor		 : Samsung
+		GDDR Version		 : 0x6
+		GDDR Density		 : 4096 Mb
+		GDDR Size		 : 15872 MB
+		GDDR Technology		 : GDDR5 
+		GDDR Speed		 : 5.500000 GT/s 
+		GDDR Frequency		 : 2750000 kHz
+		GDDR Voltage		 : 1501000 uV
 ```
 
 ## Offload Mode
 
 To compile a code for Intel Xeon Phi a MPSS stack has to be installed on the machine where compilation is executed. Currently the MPSS stack is only installed on compute nodes equipped with accelerators.
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0
-    $ module load intel/13.5.192
+```console
+$ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
+$ ml intel
 ```
 
 For debugging purposes it is also recommended to set environment variable "OFFLOAD_REPORT". Value can be set from 0 to 3, where higher number means more debugging information.
 
-```bash
-    export OFFLOAD_REPORT=3
+```console
+export OFFLOAD_REPORT=3
 ```
 
-A very basic example of code that employs offload programming technique is shown in the next listing.
+A very basic example of code that employs offload programming technique is shown in the next listing. Please note that this code is sequential and utilizes only single core of the accelerator.
 
-!!! note
-    This code is sequential and utilizes only single core of the accelerator.
-
-```bash
-    $ vim source-offload.cpp
+```console
+$ cat source-offload.cpp
 
-    #include <iostream>
+#include <iostream>
 
-    int main(int argc, char* argv[])
-    {
-        const int niter = 100000;
-        double result = 0;
+int main(int argc, char* argv[])
+{
+    const int niter = 100000;
+    double result = 0;
 
-     #pragma offload target(mic)
-        for (int i = 0; i < niter; ++i) {
-            const double t = (i + 0.5) / niter;
-            result += 4.0 / (t * t + 1.0);
-        }
-        result /= niter;
-        std::cout << "Pi ~ " << result << 'n';
+ #pragma offload target(mic)
+    for (int i = 0; i < niter; ++i) {
+        const double t = (i + 0.5) / niter;
+        result += 4.0 / (t * t + 1.0);
     }
+    result /= niter;
+    std::cout << "Pi ~ " << result << '\n';
+}
 ```
 
 To compile a code using Intel compiler run
 
-```bash
-    $ icc source-offload.cpp -o bin-offload
+```console
+$ icc source-offload.cpp -o bin-offload
 ```
 
 To execute the code, run the following command on the host
 
-```bash
-    ./bin-offload
+```console
+$ ./bin-offload
 ```
 
 ### Parallelization in Offload Mode Using OpenMP
 
 One way of paralelization a code for Xeon Phi is using OpenMP directives. The following example shows code for parallel vector addition.
 
-```bash
-    $ vim ./vect-add
+```console
+$ cat ./vect-add
 
     #include <stdio.h>
 
@@ -224,10 +270,9 @@ One way of paralelization a code for Xeon Phi is using OpenMP directives. The fo
 
 During the compilation Intel compiler shows which loops have been vectorized in both host and accelerator. This can be enabled with compiler option "-vec-report2". To compile and execute the code run
 
-```bash
-    $ icc vect-add.c -openmp_report2 -vec-report2 -o vect-add
-
-    $ ./vect-add
+```console
+$ icc vect-add.c -openmp_report2 -vec-report2 -o vect-add
+$ ./vect-add
 ```
 
 Some interesting compiler flags useful not only for code debugging are:
@@ -244,18 +289,19 @@ Some interesting compiler flags useful not only for code debugging are:
 
 Intel MKL includes an Automatic Offload (AO) feature that enables computationally intensive MKL functions called in user code to benefit from attached Intel Xeon Phi coprocessors automatically and transparently.
 
-Behavioral of automatic offload mode is controlled by functions called within the program or by environmental variables. Complete list of controls is listed [here](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_userguide_lnx/GUID-3DC4FC7D-A1E4-423D-9C0C-06AB265FFA86.htm).
+!!! note
+    Behavioral of automatic offload mode is controlled by functions called within the program or by environmental variables. Complete list of controls is listed [here](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_userguide_lnx/GUID-3DC4FC7D-A1E4-423D-9C0C-06AB265FFA86.htm).
 
 The Automatic Offload may be enabled by either an MKL function call within the code:
 
 ```cpp
-    mkl_mic_enable();
+mkl_mic_enable();
 ```
 
 or by setting environment variable
 
-```bash
-    $ export MKL_MIC_ENABLE=1
+```console
+$ export MKL_MIC_ENABLE=1
 ```
 
 To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation).
@@ -264,68 +310,68 @@ To get more information about automatic offload please refer to "[Using Intel®
 
 At first get an interactive PBS session on a node with MIC accelerator and load "intel" module that automatically loads "mkl" module as well.
 
-```bash
-    $ qsub -I -q qmic -A OPEN-0-0 -l select=1:ncpus=16
-    $ module load intel
+```console
+$ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
+$ ml intel
 ```
 
-Following example show how to automatically offload an SGEMM (single precision - g dir="auto">eneral matrix multiply) function to MIC coprocessor. The code can be copied to a file and compiled without any necessary modification.
-
-```bash
-    $ vim sgemm-ao-short.c
+The code can be copied to a file and compiled without any necessary modification. 
 
-    #include <stdio.h>
-    #include <stdlib.h>
-    #include <malloc.h>
-    #include <stdint.h>
+```console
+$ vim sgemm-ao-short.c
 
-    #include "mkl.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <stdint.h>
 
-    int main(int argc, char **argv)
-    {
-            float *A, *B, *C; /* Matrices */
+#include "mkl.h"
 
-            MKL_INT N = 2560; /* Matrix dimensions */
-            MKL_INT LD = N; /* Leading dimension */
-            int matrix_bytes; /* Matrix size in bytes */
-            int matrix_elements; /* Matrix size in elements */
+int main(int argc, char **argv)
+{
+        float *A, *B, *C; /* Matrices */
 
-            float alpha = 1.0, beta = 1.0; /* Scaling factors */
-            char transa = 'N', transb = 'N'; /* Transposition options */
+        MKL_INT N = 2560; /* Matrix dimensions */
+        MKL_INT LD = N; /* Leading dimension */
+        int matrix_bytes; /* Matrix size in bytes */
+        int matrix_elements; /* Matrix size in elements */
 
-            int i, j; /* Counters */
+        float alpha = 1.0, beta = 1.0; /* Scaling factors */
+        char transa = 'N', transb = 'N'; /* Transposition options */
 
-            matrix_elements = N * N;
-            matrix_bytes = sizeof(float) * matrix_elements;
+        int i, j; /* Counters */
 
-            /* Allocate the matrices */
-            A = malloc(matrix_bytes); B = malloc(matrix_bytes); C = malloc(matrix_bytes);
+        matrix_elements = N * N;
+        matrix_bytes = sizeof(float) * matrix_elements;
 
-            /* Initialize the matrices */
-            for (i = 0; i < matrix_elements; i++) {
-                    A[i] = 1.0; B[i] = 2.0; C[i] = 0.0;
-            }
+        /* Allocate the matrices */
+        A = malloc(matrix_bytes); B = malloc(matrix_bytes); C = malloc(matrix_bytes);
 
-            printf("Computing SGEMM on the hostn");
-            sgemm(&transa, &transb, &N, &N, &N, &alpha, A, &N, B, &N, &beta, C, &N);
+        /* Initialize the matrices */
+        for (i = 0; i < matrix_elements; i++) {
+                A[i] = 1.0; B[i] = 2.0; C[i] = 0.0;
+        }
 
-            printf("Enabling Automatic Offloadn");
-            /* Alternatively, set environment variable MKL_MIC_ENABLE=1 */
-            mkl_mic_enable();
+        printf("Computing SGEMM on the host\n");
+        sgemm(&transa, &transb, &N, &N, &N, &alpha, A, &N, B, &N, &beta, C, &N);
 
-            int ndevices = mkl_mic_get_device_count(); /* Number of MIC devices */
-            printf("Automatic Offload enabled: %d MIC devices presentn",   ndevices);
+        printf("Enabling Automatic Offload\n");
+        /* Alternatively, set environment variable MKL_MIC_ENABLE=1 */
+        mkl_mic_enable();
+        
+        int ndevices = mkl_mic_get_device_count(); /* Number of MIC devices */
+        printf("Automatic Offload enabled: %d MIC devices present\n",   ndevices);
 
-            printf("Computing SGEMM with automatic workdivisionn");
-            sgemm(&transa, &transb, &N, &N, &N, &alpha, A, &N, B, &N, &beta, C, &N);
+        printf("Computing SGEMM with automatic workdivision\n");
+        sgemm(&transa, &transb, &N, &N, &N, &alpha, A, &N, B, &N, &beta, C, &N);
 
-            /* Free the matrix memory */
-            free(A); free(B); free(C);
+        /* Free the matrix memory */
+        free(A); free(B); free(C);
 
-            printf("Donen");
+        printf("Done\n");
 
-        return 0;
-    }
+    return 0;
+}
 ```
 
 !!! note
@@ -333,31 +379,74 @@ Following example show how to automatically offload an SGEMM (single precision -
 
 To compile a code using Intel compiler use:
 
-```bash
-    $ icc -mkl sgemm-ao-short.c -o sgemm
+```console
+$ icc -mkl sgemm-ao-short.c -o sgemm
 ```
 
 For debugging purposes enable the offload report to see more information about automatic offloading.
 
-```bash
-    $ export OFFLOAD_REPORT=2
+```console
+$ export OFFLOAD_REPORT=2
 ```
 
 The output of a code should look similar to following listing, where lines starting with [MKL] are generated by offload reporting:
 
-```bash
-    Computing SGEMM on the host
-    Enabling Automatic Offload
-    Automatic Offload enabled: 1 MIC devices present
-    Computing SGEMM with automatic workdivision
-    [MKL] [MIC --] [AO Function]    SGEMM
-    [MKL] [MIC --] [AO SGEMM Workdivision]  0.00 1.00
-    [MKL] [MIC 00] [AO SGEMM CPU Time]      0.463351 seconds
-    [MKL] [MIC 00] [AO SGEMM MIC Time]      0.179608 seconds
-    [MKL] [MIC 00] [AO SGEMM CPU->MIC Data] 52428800 bytes
-    [MKL] [MIC 00] [AO SGEMM MIC->CPU Data] 26214400 bytes
-    Done
-```
+```console
+[user@r31u03n799 ~]$ ./sgemm 
+Computing SGEMM on the host
+Enabling Automatic Offload
+Automatic Offload enabled: 2 MIC devices present
+Computing SGEMM with automatic workdivision
+[MKL] [MIC --] [AO Function]    SGEMM
+[MKL] [MIC --] [AO SGEMM Workdivision]    0.44 0.28 0.28
+[MKL] [MIC 00] [AO SGEMM CPU Time]    0.252427 seconds
+[MKL] [MIC 00] [AO SGEMM MIC Time]    0.091001 seconds
+[MKL] [MIC 00] [AO SGEMM CPU->MIC Data]    34078720 bytes
+[MKL] [MIC 00] [AO SGEMM MIC->CPU Data]    7864320 bytes
+[MKL] [MIC 01] [AO SGEMM CPU Time]    0.252427 seconds
+[MKL] [MIC 01] [AO SGEMM MIC Time]    0.094758 seconds
+[MKL] [MIC 01] [AO SGEMM CPU->MIC Data]    34078720 bytes
+[MKL] [MIC 01] [AO SGEMM MIC->CPU Data]    7864320 bytes
+Done
+```
+
+!!! note ""
+    Behavioral of automatic offload mode is controlled by functions called within the program or by environmental variables. Complete list of controls is listed [here](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_userguide_lnx/GUID-3DC4FC7D-A1E4-423D-9C0C-06AB265FFA86.htm).
+
+### Automatic offload example #2
+
+In this example, we will demonstrate automatic offload control via an environment vatiable MKL_MIC_ENABLE. The function DGEMM will be offloaded.
+
+At first get an interactive PBS session on a node with MIC accelerator.
+
+```console
+$ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
+```
+
+Once in, we enable the offload and run the Octave software. In octave, we generate two large random matrices and let them multiply together.
+
+```console
+$ export MKL_MIC_ENABLE=1
+$ export OFFLOAD_REPORT=2
+$ ml Octave/3.8.2-intel-2015b
+$ octave -q
+octave:1> A=rand(10000);
+octave:2> B=rand(10000);
+octave:3> C=A*B;
+[MKL] [MIC --] [AO Function]    DGEMM
+[MKL] [MIC --] [AO DGEMM Workdivision]    0.14 0.43 0.43
+[MKL] [MIC 00] [AO DGEMM CPU Time]    3.814714 seconds
+[MKL] [MIC 00] [AO DGEMM MIC Time]    2.781595 seconds
+[MKL] [MIC 00] [AO DGEMM CPU->MIC Data]    1145600000 bytes
+[MKL] [MIC 00] [AO DGEMM MIC->CPU Data]    1382400000 bytes
+[MKL] [MIC 01] [AO DGEMM CPU Time]    3.814714 seconds
+[MKL] [MIC 01] [AO DGEMM MIC Time]    2.843016 seconds
+[MKL] [MIC 01] [AO DGEMM CPU->MIC Data]    1145600000 bytes
+[MKL] [MIC 01] [AO DGEMM MIC->CPU Data]    1382400000 bytes
+octave:4> exit
+```
+
+On the example above we observe, that the DGEMM function workload was split over CPU, MIC 0 and MIC 1, in the ratio 0.14 0.43 0.43. The matrix multiplication was done on the CPU, accelerated by two Xeon Phi accelerators.
 
 ## Native Mode
 
@@ -365,10 +454,9 @@ In the native mode a program is executed directly on Intel Xeon Phi without invo
 
 To compile a code user has to be connected to a compute with MIC and load Intel compilers module. To get an interactive session on a compute node with an Intel Xeon Phi and load the module use following commands:
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0
-
-    $ module load intel/13.5.192
+```console
+$ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
+$ ml intel
 ```
 
 !!! note
@@ -376,105 +464,108 @@ To compile a code user has to be connected to a compute with MIC and load Intel
 
 To produce a binary compatible with Intel Xeon Phi architecture user has to specify "-mmic" compiler flag. Two compilation examples are shown below. The first example shows how to compile OpenMP parallel code "vect-add.c" for host only:
 
-```bash
-    $ icc -xhost -no-offload -fopenmp vect-add.c -o vect-add-host
+```console
+$ icc -xhost -no-offload -fopenmp vect-add.c -o vect-add-host
 ```
 
 To run this code on host, use:
 
-```bash
-    $ ./vect-add-host
+```console
+$ ./vect-add-host
 ```
 
 The second example shows how to compile the same code for Intel Xeon Phi:
 
-```bash
-    $ icc -mmic -fopenmp vect-add.c -o vect-add-mic
+```console
+$ icc -mmic -fopenmp vect-add.c -o vect-add-mic
 ```
 
 ### Execution of the Program in Native Mode on Intel Xeon Phi
 
 The user access to the Intel Xeon Phi is through the SSH. Since user home directories are mounted using NFS on the accelerator, users do not have to copy binary files or libraries between the host and accelerator.
 
+Get the PATH of MIC enabled libraries for currently used Intel Compiler (here was icc/2015.3.187-GNU-5.1.0-2.25 used):
+
+```console
+$ echo $MIC_LD_LIBRARY_PATH
+/apps/all/icc/2015.3.187-GNU-5.1.0-2.25/composer_xe_2015.3.187/compiler/lib/mic
+```
+
 To connect to the accelerator run:
 
-```bash
-    $ ssh mic0
+```console
+$ ssh mic0
 ```
 
 If the code is sequential, it can be executed directly:
 
-```bash
-    mic0 $ ~/path_to_binary/vect-add-seq-mic
+```console
+mic0 $ ~/path_to_binary/vect-add-seq-mic
 ```
 
 If the code is parallelized using OpenMP a set of additional libraries is required for execution. To locate these libraries new path has to be added to the LD_LIBRARY_PATH environment variable prior to the execution:
 
-```bash
-    mic0 $ export LD_LIBRARY_PATH=/apps/intel/composer_xe_2013.5.192/compiler/lib/mic:$LD_LIBRARY_PATH
+```console
+mic0 $ export LD_LIBRARY_PATH=/apps/all/icc/2015.3.187-GNU-5.1.0-2.25/composer_xe_2015.3.187/compiler/lib/mic:$LD_LIBRARY_PATH
 ```
 
 !!! note
-    The path exported contains path to a specific compiler (here the version is 5.192). This version number has to match with the version number of the Intel compiler module that was used to compile the code on the host computer.
+    Please note that the path exported in the previous example contains path to a specific compiler (here the version is 2015.3.187-GNU-5.1.0-2.25). This version number has to match with the version number of the Intel compiler module that was used to compile the code on the host computer.
 
 For your information the list of libraries and their location required for execution of an OpenMP parallel code on Intel Xeon Phi is:
 
 !!! note
-    /apps/intel/composer_xe_2013.5.192/compiler/lib/mic
+    /apps/all/icc/2015.3.187-GNU-5.1.0-2.25/composer_xe_2015.3.187/compiler/lib/mic
 
-    - libiomp5.so
-    - libimf.so
-    - libsvml.so
-    - libirng.so
-    - libintlc.so.5
+    libiomp5.so 
+    libimf.so 
+    libsvml.so 
+    libirng.so 
+    libintlc.so.5
 
 Finally, to run the compiled code use:
 
-```bash
-    $ ~/path_to_binary/vect-add-mic
-```
-
 ## OpenCL
 
 OpenCL (Open Computing Language) is an open standard for general-purpose parallel programming for diverse mix of multi-core CPUs, GPU coprocessors, and other parallel processors. OpenCL provides a flexible execution model and uniform programming environment for software developers to write portable code for systems running on both the CPU and graphics processors or accelerators like the Intel® Xeon Phi.
 
-On Anselm OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes.
+On Salomon OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes.
 
-```bash
-    module load opencl-sdk opencl-rt
+```console
+module load opencl-sdk opencl-rt
 ```
 
 Always load "opencl-sdk" (providing devel files like headers) and "opencl-rt" (providing dynamic library libOpenCL.so) modules to compile and link OpenCL code. Load "opencl-rt" for running your compiled code.
 
 There are two basic examples of OpenCL code in the following directory:
 
-```bash
-    /apps/intel/opencl-examples/
+```console
+/apps/intel/opencl-examples/
 ```
 
 First example "CapsBasic" detects OpenCL compatible hardware, here CPU and MIC, and prints basic information about the capabilities of it.
 
-```bash
-    /apps/intel/opencl-examples/CapsBasic/capsbasic
+```console
+/apps/intel/opencl-examples/CapsBasic/capsbasic
 ```
 
-To compile and run the example copy it to your home directory, get a PBS interactive session on of the nodes with MIC and run make for compilation. Make files are very basic and shows how the OpenCL code can be compiled on Anselm.
+To compile and run the example copy it to your home directory, get a PBS interactive session on of the nodes with MIC and run make for compilation. Make files are very basic and shows how the OpenCL code can be compiled on Salomon.
 
-```bash
-    $ cp /apps/intel/opencl-examples/CapsBasic/* .
-    $ qsub -I -q qmic -A NONE-0-0
-    $ make
+```console
+$ cp /apps/intel/opencl-examples/CapsBasic/* .
+$ qsub -I -q qmic -A NONE-0-0
+$ make
 ```
 
 The compilation command for this example is:
 
-```bash
-    $ g++ capsbasic.cpp -lOpenCL -o capsbasic -I/apps/intel/opencl/include/
+```console
+$ g++ capsbasic.cpp -lOpenCL -o capsbasic -I/apps/intel/opencl/include/
 ```
 
 After executing the complied binary file, following output should be displayed.
 
-```bash
+```console
     ./capsbasic
 
     Number of available platforms: 1
@@ -505,22 +596,22 @@ After executing the complied binary file, following output should be displayed.
 
 The second example that can be found in "/apps/intel/opencl-examples" directory is General Matrix Multiply. You can follow the the same procedure to download the example to your directory and compile it.
 
-```bash
-    $ cp -r /apps/intel/opencl-examples/* .
-    $ qsub -I -q qmic -A NONE-0-0
-    $ cd GEMM
-    $ make
+```console
+$ cp -r /apps/intel/opencl-examples/* .
+$ qsub -I -q qmic -A NONE-0-0
+$ cd GEMM
+$ make
 ```
 
 The compilation command for this example is:
 
-```bash
-    $ g++ cmdoptions.cpp gemm.cpp ../common/basic.cpp ../common/cmdparser.cpp ../common/oclobject.cpp -I../common -lOpenCL -o gemm -I/apps/intel/opencl/include/
+```console
+$ g++ cmdoptions.cpp gemm.cpp ../common/basic.cpp ../common/cmdparser.cpp ../common/oclobject.cpp -I../common -lOpenCL -o gemm -I/apps/intel/opencl/include/
 ```
 
 To see the performance of Intel Xeon Phi performing the DGEMM run the example as follows:
 
-```bash
+```console
     ./gemm -d 1
     Platforms (1):
      [0] Intel(R) OpenCL [Selected]
@@ -547,28 +638,48 @@ To see the performance of Intel Xeon Phi performing the DGEMM run the example as
 
 ### Environment Setup and Compilation
 
+To achieve best MPI performance always use following setup for Intel MPI on Xeon Phi accelerated nodes:
+
+```console
+$ export I_MPI_FABRICS=shm:dapl
+$ export I_MPI_DAPL_PROVIDER_LIST=ofa-v2-mlx4_0-1u,ofa-v2-scif0,ofa-v2-mcm-1
+```
+
+This ensures, that MPI inside node will use SHMEM communication, between HOST and Phi the IB SCIF will be used and between different nodes or Phi's on diferent nodes a CCL-Direct proxy will be used.
+
+!!! note
+    Other FABRICS like tcp,ofa may be used (even combined with shm) but there's severe loss of performance (by order of magnitude). 
+    Usage of single DAPL PROVIDER (e. g. I_MPI_DAPL_PROVIDER=ofa-v2-mlx4_0-1u) will cause failure of Host<->Phi and/or Phi<->Phi communication.
+    Usage of the I_MPI_DAPL_PROVIDER_LIST on non-accelerated node will cause failure of any MPI communication, since those nodes don't have SCIF device and there's no CCL-Direct proxy runnig.
+
 Again an MPI code for Intel Xeon Phi has to be compiled on a compute node with accelerator and MPSS software stack installed. To get to a compute node with accelerator use:
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0
+```console
+$ qsub -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
 ```
 
 The only supported implementation of MPI standard for Intel Xeon Phi is Intel MPI. To setup a fully functional development environment a combination of Intel compiler and Intel MPI has to be used. On a host load following modules before compilation:
 
-```bash
-    $ module load intel/13.5.192 impi/4.1.1.036
+```console
+$ module load intel
 ```
 
 To compile an MPI code for host use:
 
-```bash
-    $ mpiicc -xhost -o mpi-test mpi-test.c
+```console
+$ mpiicc -xhost -o mpi-test mpi-test.c
 ```
 
 To compile the same code for Intel Xeon Phi architecture use:
 
-```bash
-    $ mpiicc -mmic -o mpi-test-mic mpi-test.c
+```console
+$ mpiicc -mmic -o mpi-test-mic mpi-test.c
+```
+
+Or, if you are using Fortran :
+
+```console
+$ mpiifort -mmic -o mpi-test-mic mpi-test.f90
 ```
 
 An example of basic MPI version of "hello-world" example in C language, that can be executed on both host and Xeon Phi is (can be directly copy and pasted to a .c file)
@@ -613,17 +724,17 @@ Intel MPI for the Xeon Phi coprocessors offers different MPI programming models:
 
 In this case all environment variables are set by modules, so to execute the compiled MPI program on a single node, use:
 
-```bash
-    $ mpirun -np 4 ./mpi-test
+```console
+$ mpirun -np 4 ./mpi-test
 ```
 
 The output should be similar to:
 
-```bash
-    Hello world from process 1 of 4 on host cn207
-    Hello world from process 3 of 4 on host cn207
-    Hello world from process 2 of 4 on host cn207
-    Hello world from process 0 of 4 on host cn207
+```console
+Hello world from process 1 of 4 on host r38u31n1000
+Hello world from process 3 of 4 on host r38u31n1000
+Hello world from process 2 of 4 on host r38u31n1000
+Hello world from process 0 of 4 on host r38u31n1000
 ```
 
 ### Coprocessor-Only Model
@@ -635,18 +746,27 @@ coprocessor; or 2.) lunch the task using "**mpiexec.hydra**" from a host.
 
 Similarly to execution of OpenMP programs in native mode, since the environmental module are not supported on MIC, user has to setup paths to Intel MPI libraries and binaries manually. One time setup can be done by creating a "**.profile**" file in user's home directory. This file sets up the environment on the MIC automatically once user access to the accelerator through the SSH.
 
-```bash
-    $ vim ~/.profile
+At first get the LD_LIBRARY_PATH for currenty used Intel Compiler and Intel MPI:
+
+```console
+$ echo $MIC_LD_LIBRARY_PATH
+/apps/all/imkl/11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25/mkl/lib/mic:/apps/all/imkl/11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25/lib/mic:/apps/all/icc/2015.3.187-GNU-5.1.0-2.25/composer_xe_2015.3.187/compiler/lib/mic/
+```
+
+Use it in your ~/.profile:
+
+```console
+$ cat ~/.profile
 
-    PS1='[u@h W]$ '
-    export PATH=/usr/bin:/usr/sbin:/bin:/sbin
+PS1='[\u@\h \W]\$ '
+export PATH=/usr/bin:/usr/sbin:/bin:/sbin
 
-    #OpenMP
-    export LD_LIBRARY_PATH=/apps/intel/composer_xe_2013.5.192/compiler/lib/mic:$LD_LIBRARY_PATH
+#IMPI
+export PATH=/apps/all/impi/5.0.3.048-iccifort-2015.3.187-GNU-5.1.0-2.25/mic/bin/:$PATH
+
+#OpenMP (ICC, IFORT), IMKL and IMPI
+export LD_LIBRARY_PATH=/apps/all/imkl/11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25/mkl/lib/mic:/apps/all/imkl/11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25/lib/mic:/apps/all/icc/2015.3.187-GNU-5.1.0-2.25/composer_xe_2015.3.187/compiler/lib/mic:$LD_LIBRARY_PATH
 
-    #Intel MPI
-    export LD_LIBRARY_PATH=/apps/intel/impi/4.1.1.036/mic/lib/:$LD_LIBRARY_PATH
-    export PATH=/apps/intel/impi/4.1.1.036/mic/bin/:$PATH
 ```
 
 !!! note
@@ -655,29 +775,29 @@ Similarly to execution of OpenMP programs in native mode, since the environmenta
 
 To access a MIC accelerator located on a node that user is currently connected to, use:
 
-```bash
-    $ ssh mic0
+```console
+$ ssh mic0
 ```
 
 or in case you need specify a MIC accelerator on a particular node, use:
 
-```bash
-    $ ssh cn207-mic0
+```console
+$ ssh r38u31n1000-mic0
 ```
 
 To run the MPI code in parallel on multiple core of the accelerator, use:
 
-```bash
-    $ mpirun -np 4 ./mpi-test-mic
+```console
+$ mpirun -np 4 ./mpi-test-mic
 ```
 
 The output should be similar to:
 
-```bash
-    Hello world from process 1 of 4 on host cn207-mic0
-    Hello world from process 2 of 4 on host cn207-mic0
-    Hello world from process 3 of 4 on host cn207-mic0
-    Hello world from process 0 of 4 on host cn207-mic0
+```console
+Hello world from process 1 of 4 on host r38u31n1000-mic0
+Hello world from process 2 of 4 on host r38u31n1000-mic0
+Hello world from process 3 of 4 on host r38u31n1000-mic0
+Hello world from process 0 of 4 on host r38u31n1000-mic0
 ```
 
 #### Execution on Host
@@ -686,20 +806,20 @@ If the MPI program is launched from host instead of the coprocessor, the environ
 
 First step is to tell mpiexec that the MPI should be executed on a local accelerator by setting up the environmental variable "I_MPI_MIC"
 
-```bash
-    $ export I_MPI_MIC=1
+```console
+$ export I_MPI_MIC=1
 ```
 
 Now the MPI program can be executed as:
 
-```bash
-    $ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic
+```console
+$ mpirun -genv LD_LIBRARY_PATH $MIC_LD_LIBRARY_PATH -host mic0 -n 4 ~/mpi-test-mic
 ```
 
 or using mpirun
 
-```bash
-    $ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic
+```console
+$ mpirun -genv LD_LIBRARY_PATH $MIC_LD_LIBRARY_PATH -host mic0 -n 4 ~/mpi-test-mic
 ```
 
 !!! note
@@ -708,11 +828,11 @@ or using mpirun
 
 The output should be again similar to:
 
-```bash
-    Hello world from process 1 of 4 on host cn207-mic0
-    Hello world from process 2 of 4 on host cn207-mic0
-    Hello world from process 3 of 4 on host cn207-mic0
-    Hello world from process 0 of 4 on host cn207-mic0
+```console
+Hello world from process 1 of 4 on host r38u31n1000-mic0
+Hello world from process 2 of 4 on host r38u31n1000-mic0
+Hello world from process 3 of 4 on host r38u31n1000-mic0
+Hello world from process 0 of 4 on host r38u31n1000-mic0
 ```
 
 !!! hint
@@ -720,183 +840,171 @@ The output should be again similar to:
 
 A simple test to see if the file is present is to execute:
 
-```bash
-      $ ssh mic0 ls /bin/pmi_proxy
-      /bin/pmi_proxy
+```console
+$ ssh mic0 ls /bin/pmi_proxy
+  /bin/pmi_proxy
 ```
 
 #### Execution on Host - MPI Processes Distributed Over Multiple Accelerators on Multiple Nodes
 
 To get access to multiple nodes with MIC accelerator, user has to use PBS to allocate the resources. To start interactive session, that allocates 2 compute nodes = 2 MIC accelerators run qsub command with following parameters:
 
-```bash
-    $ qsub -I -q qmic -A NONE-0-0 -l select=2:ncpus=16
-
-    $ module load intel/13.5.192 impi/4.1.1.036
+```console
+$ qsub -I -q qprod -l select=2:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 -A NONE-0-0
+$ module load intel impi
 ```
 
 This command connects user through ssh to one of the nodes immediately. To see the other nodes that have been allocated use:
 
-```bash
-    $ cat $PBS_NODEFILE
+```console
+$ cat $PBS_NODEFILE
 ```
 
 For example:
 
-```bash
-    cn204.bullx
-    cn205.bullx
+```console
+r25u25n710.ib0.smc.salomon.it4i.cz
+r25u26n711.ib0.smc.salomon.it4i.cz
 ```
 
-This output means that the PBS allocated nodes cn204 and cn205, which means that user has direct access to "**cn204-mic0**" and "**cn-205-mic0**" accelerators.
+This output means that the PBS allocated nodes cn204 and cn205, which means that user has direct access to "**r25u25n710-mic0**" and "**r25u26n711-mic0**" accelerators.
 
 !!! note
     At this point user can connect to any of the allocated nodes or any of the allocated MIC accelerators using ssh:
-    - to connect to the second node : `$ ssh cn205`
-    - to connect to the accelerator on the first node from the first node:  `$ ssh cn204-mic0` or `$ ssh mic0`
-    - to connect to the accelerator on the second node from the first node: `$ ssh cn205-mic0`
+    - to connect to the second node : `$ ssh r25u26n711`
+    - to connect to the accelerator on the first node from the first node:  `$ ssh r25u25n710-mic0` or `$ ssh mic0`
+    - to connect to the accelerator on the second node from the first node: `$ ssh r25u25n711-mic0`
 
-At this point we expect that correct modules are loaded and binary is compiled. For parallel execution the mpiexec.hydra is used. Again the first step is to tell mpiexec that the MPI can be executed on MIC accelerators by setting up the environmental variable "I_MPI_MIC"
+At this point we expect that correct modules are loaded and binary is compiled. For parallel execution the mpiexec.hydra is used. Again the first step is to tell mpiexec that the MPI can be executed on MIC accelerators by setting up the environmental variable "I_MPI_MIC", don't forget to have correct FABRIC and PROVIDER defined.
 
-```bash
-    $ export I_MPI_MIC=1
+```console
+$ export I_MPI_MIC=1
+$ export I_MPI_FABRICS=shm:dapl
+$ export I_MPI_DAPL_PROVIDER_LIST=ofa-v2-mlx4_0-1u,ofa-v2-scif0,ofa-v2-mcm-1
 ```
 
 The launch the MPI program use:
 
-```bash
-    $ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
-     -genv I_MPI_FABRICS_LIST tcp
-     -genv I_MPI_FABRICS shm:tcp
-     -genv I_MPI_TCP_NETMASK=10.1.0.0/16
-     -host cn204-mic0 -n 4 ~/mpi-test-mic
-    : -host cn205-mic0 -n 6 ~/mpi-test-mic
+```console
+$ mpirun -genv LD_LIBRARY_PATH $MIC_LD_LIBRARY_PATH \
+ -host r25u25n710-mic0 -n 4 ~/mpi-test-mic \
+: -host r25u26n711-mic0 -n 6 ~/mpi-test-mic
 ```
 
 or using mpirun:
 
-```bash
-    $ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
-     -genv I_MPI_FABRICS_LIST tcp
-     -genv I_MPI_FABRICS shm:tcp
-     -genv I_MPI_TCP_NETMASK=10.1.0.0/16
-     -host cn204-mic0 -n 4 ~/mpi-test-mic
-    : -host cn205-mic0 -n 6 ~/mpi-test-mic
+```console
+$ mpirun -genv LD_LIBRARY_PATH \
+ -host r25u25n710-mic0 -n 4 ~/mpi-test-mic \
+: -host r25u26n711-mic0 -n 6 ~/mpi-test-mic
 ```
 
 In this case four MPI processes are executed on accelerator cn204-mic and six processes are executed on accelerator cn205-mic0. The sample output (sorted after execution) is:
 
-```bash
-    Hello world from process 0 of 10 on host cn204-mic0
-    Hello world from process 1 of 10 on host cn204-mic0
-    Hello world from process 2 of 10 on host cn204-mic0
-    Hello world from process 3 of 10 on host cn204-mic0
-    Hello world from process 4 of 10 on host cn205-mic0
-    Hello world from process 5 of 10 on host cn205-mic0
-    Hello world from process 6 of 10 on host cn205-mic0
-    Hello world from process 7 of 10 on host cn205-mic0
-    Hello world from process 8 of 10 on host cn205-mic0
-    Hello world from process 9 of 10 on host cn205-mic0
+```console
+Hello world from process 0 of 10 on host r25u25n710-mic0
+Hello world from process 1 of 10 on host r25u25n710-mic0
+Hello world from process 2 of 10 on host r25u25n710-mic0
+Hello world from process 3 of 10 on host r25u25n710-mic0
+Hello world from process 4 of 10 on host r25u26n711-mic0
+Hello world from process 5 of 10 on host r25u26n711-mic0
+Hello world from process 6 of 10 on host r25u26n711-mic0
+Hello world from process 7 of 10 on host r25u26n711-mic0
+Hello world from process 8 of 10 on host r25u26n711-mic0
+Hello world from process 9 of 10 on host r25u26n711-mic0
 ```
 
 The same way MPI program can be executed on multiple hosts:
 
-```bash
-    $ mpiexec.hydra -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
-     -genv I_MPI_FABRICS_LIST tcp
-     -genv I_MPI_FABRICS shm:tcp
-     -genv I_MPI_TCP_NETMASK=10.1.0.0/16
-     -host cn204 -n 4 ~/mpi-test
-    : -host cn205 -n 6 ~/mpi-test
+```console
+$ mpirun -genv LD_LIBRARY_PATH $MIC_LD_LIBRARY_PATH \
+ -host r25u25n710 -n 4 ~/mpi-test \
+: -host r25u26n711 -n 6 ~/mpi-test
 ```
 
-\###Symmetric model
+### Symmetric model
 
 In a symmetric mode MPI programs are executed on both host computer(s) and MIC accelerator(s). Since MIC has a different
 architecture and requires different binary file produced by the Intel compiler two different files has to be compiled before MPI program is executed.
 
 In the previous section we have compiled two binary files, one for hosts "**mpi-test**" and one for MIC accelerators "**mpi-test-mic**". These two binaries can be executed at once using mpiexec.hydra:
 
-```bash
-    $ mpiexec.hydra
-     -genv I_MPI_FABRICS_LIST tcp
-     -genv I_MPI_FABRICS shm:tcp
-     -genv I_MPI_TCP_NETMASK=10.1.0.0/16
-     -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
-     -host cn205 -n 2 ~/mpi-test
-    : -host cn205-mic0 -n 2 ~/mpi-test-mic
+```console
+$ mpirun \
+ -genv $MIC_LD_LIBRARY_PATH \
+ -host r38u32n1001 -n 2 ~/mpi-test \
+: -host r38u32n1001-mic0 -n 2 ~/mpi-test-mic
 ```
 
-In this example the first two parameters (line 2 and 3) sets up required environment variables for execution. The third line specifies binary that is executed on host (here cn205) and the last line specifies the binary that is execute on the accelerator (here cn205-mic0).
+In this example the first two parameters (line 2 and 3) sets up required environment variables for execution. The third line specifies binary that is executed on host (here r38u32n1001) and the last line specifies the binary that is execute on the accelerator (here r38u32n1001-mic0).
 
 The output of the program is:
 
-```bash
-    Hello world from process 0 of 4 on host cn205
-    Hello world from process 1 of 4 on host cn205
-    Hello world from process 2 of 4 on host cn205-mic0
-    Hello world from process 3 of 4 on host cn205-mic0
+```console
+Hello world from process 0 of 4 on host r38u32n1001
+Hello world from process 1 of 4 on host r38u32n1001
+Hello world from process 2 of 4 on host r38u32n1001-mic0
+Hello world from process 3 of 4 on host r38u32n1001-mic0
 ```
 
 The execution procedure can be simplified by using the mpirun command with the machine file a a parameter. Machine file contains list of all nodes and accelerators that should used to execute MPI processes.
 
-An example of a machine file that uses 2 >hosts (**cn205** and **cn206**) and 2 accelerators **(cn205-mic0** and **cn206-mic0**) to run 2 MPI processes on each of them:
+An example of a machine file that uses 2 >hosts (**r38u32n1001** and **r38u32n1002**) and 2 accelerators **(r38u32n1001-mic0** and **r38u32n1002-mic0**) to run 2 MPI processes on each of them:
 
-```bash
-    $ cat hosts_file_mix
-    cn205:2
-    cn205-mic0:2
-    cn206:2
-    cn206-mic0:2
+```console
+$ cat hosts_file_mix
+r38u32n1001:2
+r38u32n1001-mic0:2
+r38u33n1002:2
+r38u33n1002-mic0:2
 ```
 
 In addition if a naming convention is set in a way that the name of the binary for host is **"bin_name"**  and the name of the binary for the accelerator is **"bin_name-mic"** then by setting up the environment variable **I_MPI_MIC_POSTFIX** to **"-mic"** user do not have to specify the names of booth binaries. In this case mpirun needs just the name of the host binary file (i.e. "mpi-test") and uses the suffix to get a name of the binary for accelerator (i..e. "mpi-test-mic").
 
-```bash
-    $ export I_MPI_MIC_POSTFIX=-mic
+```console
+$ export I_MPI_MIC_POSTFIX=-mic
 ```
 
 To run the MPI code using mpirun and the machine file "hosts_file_mix" use:
 
-```bash
-    $ mpirun
-     -genv I_MPI_FABRICS shm:tcp
-     -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/
-     -genv I_MPI_FABRICS_LIST tcp
-     -genv I_MPI_FABRICS shm:tcp
-     -genv I_MPI_TCP_NETMASK=10.1.0.0/16
-     -machinefile hosts_file_mix
-     ~/mpi-test
+```console
+$ mpirun \
+ -genv LD_LIBRARY_PATH $MIC_LD_LIBRARY_PATH \
+ -machinefile hosts_file_mix \
+ ~/mpi-test
 ```
 
 A possible output of the MPI "hello-world" example executed on two hosts and two accelerators is:
 
-```bash
-    Hello world from process 0 of 8 on host cn204
-    Hello world from process 1 of 8 on host cn204
-    Hello world from process 2 of 8 on host cn204-mic0
-    Hello world from process 3 of 8 on host cn204-mic0
-    Hello world from process 4 of 8 on host cn205
-    Hello world from process 5 of 8 on host cn205
-    Hello world from process 6 of 8 on host cn205-mic0
-    Hello world from process 7 of 8 on host cn205-mic0
+```console
+Hello world from process 0 of 8 on host r38u31n1000
+Hello world from process 1 of 8 on host r38u31n1000
+Hello world from process 2 of 8 on host r38u31n1000-mic0
+Hello world from process 3 of 8 on host r38u31n1000-mic0
+Hello world from process 4 of 8 on host r38u32n1001
+Hello world from process 5 of 8 on host r38u32n1001
+Hello world from process 6 of 8 on host r38u32n1001-mic0
+Hello world from process 7 of 8 on host r38u32n1001-mic0
 ```
 
 !!! note
     At this point the MPI communication between MIC accelerators on different nodes uses 1Gb Ethernet only.
 
-#### Using the PBS Automatically Generated Node-Files
+### Using Automatically Generated Node-Files
 
-PBS also generates a set of node-files that can be used instead of manually creating a new one every time. Three node-files are genereated:
+Set of node-files, that can be used instead of manually creating a new one every time, is generated for user convenience. Six node-files are generated:
 
 !!! note
-    **Host only node-file:**
+    **Node-files:**
 
-     - /lscratch/${PBS_JOBID}/nodefile-cn MIC only node-file:
-     - /lscratch/${PBS_JOBID}/nodefile-mic Host and MIC node-file:
-     - /lscratch/${PBS_JOBID}/nodefile-mix
+     - /lscratch/${PBS_JOBID}/nodefile-cn Hosts only node-file
+     - /lscratch/${PBS_JOBID}/nodefile-mic MICs only node-file
+     - /lscratch/${PBS_JOBID}/nodefile-mix Hosts and MICs node-file
+     - /lscratch/${PBS_JOBID}/nodefile-cn-sn Hosts only node-file, using short names
+     - /lscratch/${PBS_JOBID}/nodefile-mic-sn MICs only node-file, using short names
+     - /lscratch/${PBS_JOBID}/nodefile-mix-sn Hosts and MICs node-file, using short names
 
-Each host or accelerator is listed only per files. User has to specify how many jobs should be executed per node using "-n" parameter of the mpirun command.
+Each host or accelerator is listed only once per file. User has to specify how many jobs should be executed per node using `-n` parameter of the mpirun command.
 
 ## Optimization
 
diff --git a/docs.it4i/salomon/software/java.md b/docs.it4i/salomon/software/java.md
index 703e53fc1093cf28aeb5c80b985174784e54ad90..83c3738c0802e612ba84c25868771c44fa51a1ab 100644
--- a/docs.it4i/salomon/software/java.md
+++ b/docs.it4i/salomon/software/java.md
@@ -2,24 +2,24 @@
 
 Java is available on the cluster. Activate java by loading the Java module
 
-```bash
-    $ module load Java
+```console
+$ ml Java
 ```
 
 Note that the Java module must be loaded on the compute nodes as well, in order to run java on compute nodes.
 
 Check for java version and path
 
-```bash
-    $ java -version
-    $ which java
+```console
+$ java -version
+$ which java
 ```
 
 With the module loaded, not only the runtime environment (JRE), but also the development environment (JDK) with the compiler is available.
 
-```bash
-    $ javac -version
-    $ which javac
+```console
+$ javac -version
+$ which javac
 ```
 
 Java applications may use MPI for inter-process communication, in conjunction with Open MPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [cluster support](https://support.it4i.cz/rt/).
diff --git a/docs.it4i/salomon/software/mpi/Running_OpenMPI.md b/docs.it4i/salomon/software/mpi/Running_OpenMPI.md
index 9aa54f09aa07ccde2daa1bfc5c6ff4daeab2b78b..e2633236ac6624c7a41ed56496bacb9795158901 100644
--- a/docs.it4i/salomon/software/mpi/Running_OpenMPI.md
+++ b/docs.it4i/salomon/software/mpi/Running_OpenMPI.md
@@ -10,16 +10,14 @@ Use the mpiexec to run the OpenMPI code.
 
 Example:
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=24 -I
+```console
+$ qsub -q qexp -l select=4:ncpus=24 -I
     qsub: waiting for job 15210.isrv5 to start
     qsub: job 15210.isrv5 ready
-
-    $ pwd
+$ pwd
     /home/username
-
-    $ module load OpenMPI
-    $ mpiexec -pernode ./helloworld_mpi.x
+$ ml OpenMPI
+$ mpiexec -pernode ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host r1i0n17
     Hello world! from rank 1 of 4 on host r1i0n5
     Hello world! from rank 2 of 4 on host r1i0n6
@@ -33,11 +31,10 @@ Note that the executable helloworld_mpi.x must be available within the same path
 
 You need to preload the executable, if running on the local ramdisk /tmp filesystem
 
-```bash
-    $ pwd
+```console
+$ pwd
     /tmp/pbs.15210.isrv5
-
-    $ mpiexec -pernode --preload-binary ./helloworld_mpi.x
+$ mpiexec -pernode --preload-binary ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host r1i0n17
     Hello world! from rank 1 of 4 on host r1i0n5
     Hello world! from rank 2 of 4 on host r1i0n6
@@ -54,12 +51,10 @@ The mpiprocs and ompthreads parameters allow for selection of number of running
 
 Follow this example to run one MPI process per node, 24 threads per process.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=24:mpiprocs=1:ompthreads=24 -I
-
-    $ module load OpenMPI
-
-    $ mpiexec --bind-to-none ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=24:mpiprocs=1:ompthreads=24 -I
+$ ml OpenMPI
+$ mpiexec --bind-to-none ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 1 MPI processes per node and 24 threads per socket, on 4 nodes.
@@ -68,12 +63,10 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 Follow this example to run two MPI processes per node, 8 threads per process. Note the options to mpiexec.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=24:mpiprocs=2:ompthreads=12 -I
-
-    $ module load OpenMPI
-
-    $ mpiexec -bysocket -bind-to-socket ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=24:mpiprocs=2:ompthreads=12 -I
+$ ml OpenMPI
+$ mpiexec -bysocket -bind-to-socket ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 2 MPI processes per node and 12 threads per socket, each process and its threads bound to a separate processor socket of the node, on 4 nodes
@@ -82,12 +75,10 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 Follow this example to run 24 MPI processes per node, 1 thread per process. Note the options to mpiexec.
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=24:mpiprocs=24:ompthreads=1 -I
-
-    $ module load OpenMPI
-
-    $ mpiexec -bycore -bind-to-core ./helloworld_mpi.x
+```console
+$ qsub -q qexp -l select=4:ncpus=24:mpiprocs=24:ompthreads=1 -I
+$ ml OpenMPI
+$ mpiexec -bycore -bind-to-core ./helloworld_mpi.x
 ```
 
 In this example, we demonstrate recommended way to run an MPI application, using 24 MPI processes per node, single threaded. Each process is bound to separate processor core, on 4 nodes.
@@ -99,21 +90,21 @@ In this example, we demonstrate recommended way to run an MPI application, using
 
 In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP:
 
-```bash
-    $ export GOMP_CPU_AFFINITY="0-23"
+```console
+$ export GOMP_CPU_AFFINITY="0-23"
 ```
 
 or this one for Intel OpenMP:
 
-```bash
-    $ export KMP_AFFINITY=granularity=fine,compact,1,0
+```console
+$ export KMP_AFFINITY=granularity=fine,compact,1,0
 ```
 
 As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the following variables may be used for Intel or GCC:
 
-```bash
-    $ export OMP_PROC_BIND=true
-    $ export OMP_PLACES=cores
+```console
+$ export OMP_PROC_BIND=true
+$ export OMP_PLACES=cores
 ```
 
 ## OpenMPI Process Mapping and Binding
@@ -126,7 +117,7 @@ MPI process mapping may be specified by a hostfile or rankfile input to the mpie
 
 Example hostfile
 
-```bash
+```console
     r1i0n17.smc.salomon.it4i.cz
     r1i0n5.smc.salomon.it4i.cz
     r1i0n6.smc.salomon.it4i.cz
@@ -135,8 +126,8 @@ Example hostfile
 
 Use the hostfile to control process placement
 
-```bash
-    $ mpiexec -hostfile hostfile ./helloworld_mpi.x
+```console
+$ mpiexec -hostfile hostfile ./helloworld_mpi.x
     Hello world! from rank 0 of 4 on host r1i0n17
     Hello world! from rank 1 of 4 on host r1i0n5
     Hello world! from rank 2 of 4 on host r1i0n6
@@ -153,7 +144,7 @@ Appropriate binding may boost performance of your application.
 
 Example rankfile
 
-```bash
+```console
     rank 0=r1i0n7.smc.salomon.it4i.cz slot=1:0,1
     rank 1=r1i0n6.smc.salomon.it4i.cz slot=0:*
     rank 2=r1i0n5.smc.salomon.it4i.cz slot=1:1-2
@@ -170,7 +161,7 @@ rank 2 will be bounded to r1i0n5, socket1, core1 and core2
 rank 3 will be bounded to r1i0n17, socket0 core1, socket1 core0, core1, core2
 rank 4 will be bounded to r1i0n6, all cores on both sockets
 
-```bash
+```console
     $ mpiexec -n 5 -rf rankfile --report-bindings ./helloworld_mpi.x
     [r1i0n17:11180]  MCW rank 3 bound to socket 0[core 1] socket 1[core 0-2]: [. B . . . . . . . . . .][B B B . . . . . . . . .] (slot list 0:1,1:0-2)
     [r1i0n7:09928] MCW rank 0 bound to socket 1[core 0-1]: [. . . . . . . . . . . .][B B . . . . . . . . . .] (slot list 1:0,1)
@@ -192,10 +183,10 @@ It is users responsibility to provide correct number of ranks, sockets and cores
 
 In all cases, binding and threading may be verified by executing for example:
 
-```bash
-    $ mpiexec -bysocket -bind-to-socket --report-bindings echo
-    $ mpiexec -bysocket -bind-to-socket numactl --show
-    $ mpiexec -bysocket -bind-to-socket echo $OMP_NUM_THREADS
+```console
+$ mpiexec -bysocket -bind-to-socket --report-bindings echo
+$ mpiexec -bysocket -bind-to-socket numactl --show
+$ mpiexec -bysocket -bind-to-socket echo $OMP_NUM_THREADS
 ```
 
 ## Changes in OpenMPI 1.8
diff --git a/docs.it4i/salomon/software/mpi/mpi.md b/docs.it4i/salomon/software/mpi/mpi.md
index 411d54ddabae7b32ef32f894f2cc466e93eeb866..99f8745aca779ad71a3ab5322499aa9e8bc9fd25 100644
--- a/docs.it4i/salomon/software/mpi/mpi.md
+++ b/docs.it4i/salomon/software/mpi/mpi.md
@@ -15,8 +15,8 @@ MPI libraries are activated via the environment modules.
 
 Look up section modulefiles/mpi in module avail
 
-```bash
-    $ module avail
+```console
+$ ml av
     ------------------------------ /apps/modules/mpi -------------------------------
     impi/4.1.1.036-iccifort-2013.5.192
     impi/4.1.1.036-iccifort-2013.5.192-GCC-4.8.3
@@ -35,16 +35,16 @@ There are default compilers associated with any particular MPI implementation. T
 
 Examples:
 
-```bash
-    $ module load gompi/2015b
+```console
+$ ml gompi/2015b
 ```
 
 In this example, we activate the latest OpenMPI with latest GNU compilers (OpenMPI 1.8.6 and GCC 5.1). Please see more information about toolchains in section [Environment and Modules](../../environment-and-modules/) .
 
 To use OpenMPI with the intel compiler suite, use
 
-```bash
-    $ module load iompi/2015.03
+```console
+$ ml iompi/2015.03
 ```
 
 In this example, the openmpi 1.8.6 using intel compilers is activated. It's used "iompi" toolchain.
@@ -53,17 +53,17 @@ In this example, the openmpi 1.8.6 using intel compilers is activated. It's used
 
 After setting up your MPI environment, compile your program using one of the mpi wrappers
 
-```bash
-    $ mpicc -v
-    $ mpif77 -v
-    $ mpif90 -v
+```console
+$ mpicc -v
+$ mpif77 -v
+$ mpif90 -v
 ```
 
 When using Intel MPI, use the following MPI wrappers:
 
-```bash
-    $ mpicc
-    $ mpiifort
+```console
+$ mpicc
+$ mpiifort
 ```
 
 Wrappers mpif90, mpif77 that are provided by Intel MPI are designed for gcc and gfortran. You might be able to compile MPI code by them even with Intel compilers, but you might run into problems (for example, native MIC compilation with -mmic does not work with mpif90).
@@ -100,8 +100,8 @@ Example program:
 
 Compile the above example with
 
-```bash
-    $ mpicc helloworld_mpi.c -o helloworld_mpi.x
+```console
+$ mpicc helloworld_mpi.c -o helloworld_mpi.x
 ```
 
 ## Running MPI Programs
diff --git a/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md
index 160478b6ed3c4dbfaf7226759fab0fd8fb9ddc67..f957f5c1439272e3b65d069e68d055e52c4cc0b8 100644
--- a/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md
+++ b/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md
@@ -8,34 +8,51 @@ MPI for Python provides bindings of the Message Passing Interface (MPI) standard
 
 This package is constructed on top of the MPI-1/2 specifications and provides an object oriented interface which closely follows MPI-2 C++ bindings. It supports point-to-point (sends, receives) and collective (broadcasts, scatters, gathers) communications of any picklable Python object, as well as optimized communications of Python object exposing the single-segment buffer interface (NumPy arrays, builtin bytes/string/array objects).
 
-On Anselm MPI4Py is available in standard Python modules.
+On Salomon MPI4Py is available in standard Python modules.
 
 ## Modules
 
 MPI4Py is build for OpenMPI. Before you start with MPI4Py you need to load Python and OpenMPI modules. You can use toolchain, that loads Python and OpenMPI at once.
 
-```bash
-    $ module load Python/2.7.9-foss-2015g
+```console
+$ ml av Python/
+--------------------------------------- /apps/modules/lang -------------------------
+   Python/2.7.8-intel-2015b    Python/2.7.11-intel-2016a  Python/3.5.1-intel-2017.00
+   Python/2.7.11-intel-2017a   Python/2.7.9-foss-2015b    Python/2.7.9-intel-2015b
+   Python/2.7.11-foss-2016a    Python/3.5.2-foss-2016a    Python/3.5.1
+   Python/2.7.9-foss-2015g     Python/3.4.3-intel-2015b   Python/2.7.9
+   Python/2.7.11-intel-2015b   Python/3.5.2
+   
+$ ml av OpenMPI/
+--------------------------------------- /apps/modules/mpi --------------------------
+OpenMPI/1.8.6-GCC-4.4.7-system   OpenMPI/1.8.8-GNU-4.9.3-2.25  OpenMPI/1.10.1-GCC-4.9.3-2.25
+OpenMPI/1.8.6-GNU-5.1.0-2.25     OpenMPI/1.8.8-GNU-5.1.0-2.25  OpenMPI/1.10.1-GNU-4.9.3-2.25
+    OpenMPI/1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25   OpenMPI/2.0.2-GCC-6.3.0-2.27
 ```
 
+!!! Warning ""
+    * modules Python/x.x.x-intel... - intel MPI
+    * modules Python/x.x.x-foss...  - OpenMPI
+    * modules Python/x.x.x - without MPI
+
 ## Execution
 
 You need to import MPI to your python program. Include the following line to the python script:
 
-```bash
+```console
     from mpi4py import MPI
 ```
 
 The MPI4Py enabled python programs [execute as any other OpenMPI](Running_OpenMPI/) code.The simpliest way is to run
 
-```bash
-    $ mpiexec python <script>.py
+```console
+$ mpiexec python <script>.py
 ```
 
 For example
 
-```bash
-    $ mpiexec python hello_world.py
+```console
+$ mpiexec python hello_world.py
 ```
 
 ## Examples
@@ -83,12 +100,10 @@ For example
 
 Execute the above code as:
 
-```bash
-    $ qsub -q qexp -l select=4:ncpus=24:mpiprocs=24:ompthreads=1 -I
-
-    $ module load Python/2.7.9-foss-2015g
-
-    $ mpiexec --map-by core --bind-to core python hello_world.py
+```console
+$ qsub -q qexp -l select=4:ncpus=24:mpiprocs=24:ompthreads=1 -I
+$ ml Python/2.7.9-foss-2015g
+ $ mpiexec --map-by core --bind-to core python hello_world.py
 ```
 
 In this example, we run MPI4Py enabled code on 4 nodes, 24 cores per node (total of 96 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pypi.python.org/pypi/mpi4py).
diff --git a/docs.it4i/salomon/software/numerical-languages/introduction.md b/docs.it4i/salomon/software/numerical-languages/introduction.md
index 50f083a91c52acc731fcbd0abe849904df757221..13ba67071a136612568b6772104f0c8c5430ba40 100644
--- a/docs.it4i/salomon/software/numerical-languages/introduction.md
+++ b/docs.it4i/salomon/software/numerical-languages/introduction.md
@@ -10,9 +10,9 @@ This section contains a collection of high-level interpreted languages, primaril
 
 MATLAB®^ is a high-level language and interactive environment for numerical computation, visualization, and programming.
 
-```bash
-    $ module load MATLAB
-    $ matlab
+```console
+$ ml MATLAB
+$ matlab
 ```
 
 Read more at the [Matlab page](matlab/).
@@ -21,9 +21,9 @@ Read more at the [Matlab page](matlab/).
 
 GNU Octave is a high-level interpreted language, primarily intended for numerical computations. The Octave language is quite similar to Matlab so that most programs are easily portable.
 
-```bash
-    $ module load Octave
-    $ octave
+```console
+$ ml Octave
+$ octave
 ```
 
 Read more at the [Octave page](octave/).
@@ -32,9 +32,9 @@ Read more at the [Octave page](octave/).
 
 The R is an interpreted language and environment for statistical computing and graphics.
 
-```bash
-    $ module load R
-    $ R
+```console
+$ ml R
+$ R
 ```
 
 Read more at the [R page](r/).
diff --git a/docs.it4i/salomon/software/numerical-languages/matlab.md b/docs.it4i/salomon/software/numerical-languages/matlab.md
index aec28baaedbec6491cfe8ba14a7442368dbdec17..e08bf9099ee9d5175a8579afe2fc9d6d32b1aa8f 100644
--- a/docs.it4i/salomon/software/numerical-languages/matlab.md
+++ b/docs.it4i/salomon/software/numerical-languages/matlab.md
@@ -9,14 +9,14 @@ Matlab is available in versions R2015a and R2015b. There are always two variants
 
 To load the latest version of Matlab load the module
 
-```bash
-    $ module load MATLAB
+```console
+$ ml MATLAB
 ```
 
 By default the EDU variant is marked as default. If you need other version or variant, load the particular version. To obtain the list of available versions use
 
-```bash
-    $ module avail MATLAB
+```console
+$ module avail MATLAB
 ```
 
 If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler.
@@ -27,14 +27,14 @@ Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so u
 
 To run Matlab with GUI, use
 
-```bash
-    $ matlab
+```console
+$ matlab
 ```
 
 To run Matlab in text mode, without the Matlab Desktop GUI environment, use
 
-```bash
-    $ matlab -nodesktop -nosplash
+```console
+$ matlab -nodesktop -nosplash
 ```
 
 plots, images, etc... will be still available.
@@ -49,7 +49,7 @@ Delete previously used file mpiLibConf.m, we have observed crashes when using In
 
 To use Distributed Computing, you first need to setup a parallel profile. We have provided the profile for you, you can either import it in MATLAB command line:
 
-```bash
+```console
     > parallel.importProfile('/apps/all/MATLAB/2015b-EDU/SalomonPBSPro.settings')
 
     ans =
@@ -67,10 +67,9 @@ With the new mode, MATLAB itself launches the workers via PBS, so you can either
 
 Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/).
 
-```bash
-    $ xhost +
-    $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=1 -l walltime=00:30:00
-    -l feature__matlab__MATLAB=1
+```console
+$ xhost +
+$ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A NONE-0-0 -q qexp -l select=1 -l walltime=00:30:00 -l feature__matlab__MATLAB=1
 ```
 
 This qsub command example shows how to run Matlab on a single node.
@@ -79,8 +78,8 @@ The second part of the command shows how to request all necessary licenses. In t
 
 Once the access to compute nodes is granted by PBS, user can load following modules and start Matlab:
 
-```bash
-    r1i0n17$ module load MATLAB/2015a-EDU
+```console
+    r1i0n17$ ml MATLAB/2015a-EDU
     r1i0n17$ matlab &
 ```
 
@@ -115,15 +114,15 @@ This script may be submitted directly to the PBS workload manager via the qsub c
 
 Submit the jobscript using qsub
 
-```bash
-    $ qsub ./jobscript
+```console
+$ qsub ./jobscript
 ```
 
 ### Parallel Matlab Local Mode Program Example
 
 The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started.
 
-```bash
+```console
     cluster = parcluster('local')
 ```
 
@@ -134,7 +133,7 @@ This script creates scheduler object "cluster" of type "local" that starts worke
 
 The last step is to start matlabpool with "cluster" object and correct number of workers. We have 24 cores per node, so we start 24 workers.
 
-```bash
+```console
     parpool(cluster,24);
 
 
@@ -146,7 +145,7 @@ The last step is to start matlabpool with "cluster" object and correct number of
 
 The complete example showing how to use Distributed Computing Toolbox in local mode is shown here.
 
-```bash
+```console
     cluster = parcluster('local');
     cluster
 
@@ -179,7 +178,7 @@ This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBS
 
 This is an example of m-script using PBS mode:
 
-```bash
+```console
     cluster = parcluster('SalomonPBSPro');
     set(cluster, 'SubmitArguments', '-A OPEN-0-0');
     set(cluster, 'ResourceTemplate', '-q qprod -l select=10:ncpus=24');
@@ -220,7 +219,7 @@ For this method, you need to use SalomonDirect profile, import it using [the sam
 
 This is an example of m-script using direct mode:
 
-```bash
+```console
     parallel.importProfile('/apps/all/MATLAB/2015b-EDU/SalomonDirect.settings')
     cluster = parcluster('SalomonDirect');
     set(cluster, 'NumWorkers', 48);
diff --git a/docs.it4i/salomon/software/numerical-languages/octave.md b/docs.it4i/salomon/software/numerical-languages/octave.md
index 6461bc4cc003b806d0f75320d58d5c9009ab5b8b..5c679dd1b87e587965d802f2845997b755254fa2 100644
--- a/docs.it4i/salomon/software/numerical-languages/octave.md
+++ b/docs.it4i/salomon/software/numerical-languages/octave.md
@@ -8,16 +8,16 @@ Two versions of octave are available on the cluster, via module
 | ---------- | ------------ | ------ |
 | **Stable** | Octave 3.8.2 | Octave |
 
-```bash
-    $ module load Octave
+```console
+$ ml Octave
 ```
 
 The octave on the cluster is linked to highly optimized MKL mathematical library. This provides threaded parallelization to many octave kernels, notably the linear algebra subroutines. Octave runs these heavy calculation kernels without any penalty. By default, octave would parallelize to 24 threads. You may control the threads by setting the OMP_NUM_THREADS environment variable.
 
 To run octave interactively, log in with ssh -X parameter for X11 forwarding. Run octave:
 
-```bash
-    $ octave
+```console
+$ octave
 ```
 
 To run octave in batch mode, write an octave script, then write a bash jobscript and execute via the qsub command. By default, octave will use 16 threads when running MKL kernels.
@@ -49,8 +49,8 @@ This script may be submitted directly to the PBS workload manager via the qsub c
 
 The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c code. This is very useful for running native c subroutines in octave environment.
 
-```bash
-    $ mkoctfile -v
+```console
+$ mkoctfile -v
 ```
 
 Octave may use MPI for interprocess communication This functionality is currently not supported on the cluster cluster. In case you require the octave interface to MPI, please contact our [cluster support](https://support.it4i.cz/rt/).
diff --git a/docs.it4i/salomon/software/numerical-languages/r.md b/docs.it4i/salomon/software/numerical-languages/r.md
index 6a01926e1b69bdd97d695d19b7a056419408acde..6df515adad043a581ce3da7855737194b1c250ae 100644
--- a/docs.it4i/salomon/software/numerical-languages/r.md
+++ b/docs.it4i/salomon/software/numerical-languages/r.md
@@ -21,8 +21,8 @@ The R version 3.1.1 is available on the cluster, along with GUI interface Rstudi
 | **R**       | R 3.1.1           | R/3.1.1-intel-2015b |
 | **Rstudio** | Rstudio 0.98.1103 | Rstudio             |
 
-```bash
-    $ module load R
+```console
+$ ml R
 ```
 
 ## Execution
@@ -33,9 +33,9 @@ The R on Anselm is linked to highly optimized MKL mathematical library. This pro
 
 To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 forwarding. Run rstudio:
 
-```bash
-    $ module load Rstudio
-    $ rstudio
+```console
+$ ml Rstudio
+$ rstudio
 ```
 
 ### Batch Execution
@@ -45,25 +45,25 @@ To run R in batch mode, write an R script, then write a bash jobscript and execu
 Example jobscript:
 
 ```bash
-    #!/bin/bash
+#!/bin/bash
 
-    # change to local scratch directory
-    cd /lscratch/$PBS_JOBID || exit
+# change to local scratch directory
+cd /lscratch/$PBS_JOBID || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/rscript.R .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/rscript.R .
 
-    # load R module
-    module load R
+# load R module
+module load R
 
-    # execute the calculation
-    R CMD BATCH rscript.R routput.out
+# execute the calculation
+R CMD BATCH rscript.R routput.out
 
-    # copy output file to home
-    cp routput.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp routput.out $PBS_O_WORKDIR/.
 
-    #exit
-    exit
+#exit
+exit
 ```
 
 This script may be submitted directly to the PBS workload manager via the qsub command.  The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section](../../job-submission-and-execution/).
@@ -78,17 +78,17 @@ The package parallel provides support for parallel computation, including by for
 
 The package is activated this way:
 
-```bash
-    $ R
-    > library(parallel)
+```console
+$ R
+> library(parallel)
 ```
 
 More information and examples may be obtained directly by reading the documentation available in R
 
-```bash
-    > ?parallel
-    > library(help = "parallel")
-    > vignette("parallel")
+```r
+> ?parallel
+> library(help = "parallel")
+> vignette("parallel")
 ```
 
 Download the package [parallell](package-parallel-vignette.pdf) vignette.
@@ -102,42 +102,42 @@ The forking is the most simple to use. Forking family of functions provide paral
 
 Forking example:
 
-```cpp
-    library(parallel)
+```r
+library(parallel)
 
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #initialize
-    size <- detectCores()
+#initialize
+size <- detectCores()
 
-    while (TRUE)
-    {
-      #read number of intervals
-      cat("Enter the number of intervals: (0 quits) ")
-      fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+while (TRUE)
+{
+  #read number of intervals
+  cat("Enter the number of intervals: (0 quits) ")
+  fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
 
-      if(n<=0) break
+  if(n<=0) break
 
-      #run the calculation
-      n <- max(n,size)
-      h <-   1.0/n
+  #run the calculation
+  n <- max(n,size)
+  h <-   1.0/n
 
-      i <- seq(1,n);
-      pi3 <- h*sum(simplify2array(mclapply(i,f,h,mc.cores=size)));
+  i <- seq(1,n);
+  pi3 <- h*sum(simplify2array(mclapply(i,f,h,mc.cores=size)));
 
-      #print results
-      cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
+  #print results
+  cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
 ```
 
 The above example is the classic parallel example for calculating the number π. Note the **detectCores()** and **mclapply()** functions. Execute the example as:
 
-```bash
-    $ R --slave --no-save --no-restore -f pi3p.R
+```console
+$ R --slave --no-save --no-restore -f pi3p.R
 ```
 
 Every evaluation of the integrad function runs in parallel on different process.
@@ -152,9 +152,9 @@ Read more on Rmpi at <http://cran.r-project.org/web/packages/Rmpi/>, reference m
 
 When using package Rmpi, both openmpi and R modules must be loaded
 
-```bash
-    $ module load OpenMPI
-    $ module load R
+```console
+$ ml OpenMPI
+$ ml R
 ```
 
 Rmpi may be used in three basic ways. The static approach is identical to executing any other MPI programm. In addition, there is Rslaves dynamic MPI approach and the mpi.apply approach. In the following section, we will use the number π integration example, to illustrate all these concepts.
@@ -165,7 +165,7 @@ Static Rmpi programs are executed via mpiexec, as any other MPI programs. Number
 
 Static Rmpi example:
 
-```cpp
+```r
     library(Rmpi)
 
     #integrand function
@@ -211,8 +211,8 @@ Static Rmpi example:
 
 The above is the static MPI example for calculating the number π. Note the **library(Rmpi)** and **mpi.comm.dup()** function calls. Execute the example as:
 
-```bash
-    $ mpirun R --slave --no-save --no-restore -f pi3.R
+```console
+$ mpirun R --slave --no-save --no-restore -f pi3.R
 ```
 
 ### Dynamic Rmpi
@@ -221,7 +221,7 @@ Dynamic Rmpi programs are executed by calling the R directly. OpenMPI module mus
 
 Dynamic Rmpi example:
 
-```cpp
+```r
     #integrand function
     f <- function(i,h) {
     x <- h*(i-0.5)
@@ -283,8 +283,8 @@ The above example is the dynamic MPI example for calculating the number π. Both
 
 Execute the example as:
 
-```bash
-    $ mpirun -np 1 R --slave --no-save --no-restore -f pi3Rslaves.R
+```console
+$ mpirun -np 1 R --slave --no-save --no-restore -f pi3Rslaves.R
 ```
 
 Note that this method uses MPI_Comm_spawn (Dynamic process feature of MPI-2) to start the slave processes - the master process needs to be launched with MPI. In general, Dynamic processes are not well supported among MPI implementations, some issues might arise. Also, environment variables are not propagated to spawned processes, so they will not see paths from modules.
@@ -299,60 +299,60 @@ Execution is identical to other dynamic Rmpi programs.
 
 mpi.apply Rmpi example:
 
-```cpp
-    #integrand function
-    f <- function(i,h) {
-    x <- h*(i-0.5)
-    return (4/(1 + x*x))
-    }
+```r
+#integrand function
+f <- function(i,h) {
+x <- h*(i-0.5)
+return (4/(1 + x*x))
+}
 
-    #the worker function
-    workerpi <- function(rank,size,n)
-    {
-      #run the calculation
-      n <- max(n,size)
-      h <- 1.0/n
+#the worker function
+workerpi <- function(rank,size,n)
+{
+  #run the calculation
+  n <- max(n,size)
+  h <- 1.0/n
 
-      i <- seq(rank,n,size);
-      mypi <- h*sum(sapply(i,f,h));
+  i <- seq(rank,n,size);
+  mypi <- h*sum(sapply(i,f,h));
 
-      return(mypi)
-    }
+  return(mypi)
+}
 
-    #main
-    library(Rmpi)
+#main
+library(Rmpi)
 
-    cat("Enter the number of slaves: ")
-    fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
+cat("Enter the number of slaves: ")
+fp<-file("stdin"); ns<-scan(fp,nmax=1); close(fp)
 
-    mpi.spawn.Rslaves(nslaves=ns)
-    mpi.bcast.Robj2slave(f)
-    mpi.bcast.Robj2slave(workerpi)
+mpi.spawn.Rslaves(nslaves=ns)
+mpi.bcast.Robj2slave(f)
+mpi.bcast.Robj2slave(workerpi)
 
-    while (TRUE)
-    {
-      #read number of intervals
-      cat("Enter the number of intervals: (0 quits) ")
-      fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
-      if(n<=0) break
+while (TRUE)
+{
+  #read number of intervals
+  cat("Enter the number of intervals: (0 quits) ")
+  fp<-file("stdin"); n<-scan(fp,nmax=1); close(fp)
+  if(n<=0) break
 
-      #run workerpi
-      i=seq(1,2*ns)
-      pi3=sum(mpi.parSapply(i,workerpi,2*ns,n))
+  #run workerpi
+  i=seq(1,2*ns)
+  pi3=sum(mpi.parSapply(i,workerpi,2*ns,n))
 
-      #print results
-      cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
-    }
+  #print results
+  cat(sprintf("Value of PI %16.14f, diff= %16.14fn",pi3,pi3-pi))
+}
 
-    mpi.quit()
+mpi.quit()
 ```
 
 The above is the mpi.apply MPI example for calculating the number π. Only the slave processes carry out the calculation. Note the **mpi.parSapply()**, function call. The package parallel [example](r/#package-parallel) [above](r/#package-parallel) may be trivially adapted (for much better performance) to this structure using the mclapply() in place of mpi.parSapply().
 
 Execute the example as:
 
-```bash
-    $ mpirun -np 1 R --slave --no-save --no-restore -f pi3parSapply.R
+```console
+$ mpirun -np 1 R --slave --no-save --no-restore -f pi3parSapply.R
 ```
 
 ## Combining Parallel and Rmpi
@@ -366,30 +366,30 @@ The R parallel jobs are executed via the PBS queue system exactly as any other p
 Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, running 1 process per core:
 
 ```bash
-    #!/bin/bash
-    #PBS -q qprod
-    #PBS -N Rjob
-    #PBS -l select=100:ncpus=24:mpiprocs=24:ompthreads=1
+#!/bin/bash
+#PBS -q qprod
+#PBS -N Rjob
+#PBS -l select=100:ncpus=24:mpiprocs=24:ompthreads=1
 
-    # change to scratch directory
-    SCRDIR=/scratch/work/user/$USER/myjob
-    cd $SCRDIR || exit
+# change to scratch directory
+SCRDIR=/scratch/work/user/$USER/myjob
+cd $SCRDIR || exit
 
-    # copy input file to scratch
-    cp $PBS_O_WORKDIR/rscript.R .
+# copy input file to scratch
+cp $PBS_O_WORKDIR/rscript.R .
 
-    # load R and openmpi module
-    module load R
-    module load OpenMPI
+# load R and openmpi module
+module load R
+module load OpenMPI
 
-    # execute the calculation
-    mpiexec -bycore -bind-to-core R --slave --no-save --no-restore -f rscript.R
+# execute the calculation
+mpiexec -bycore -bind-to-core R --slave --no-save --no-restore -f rscript.R
 
-    # copy output file to home
-    cp routput.out $PBS_O_WORKDIR/.
+# copy output file to home
+cp routput.out $PBS_O_WORKDIR/.
 
-    #exit
-    exit
+#exit
+exit
 ```
 
 For more information about jobscripts and MPI execution refer to the [Job submission](../../job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections.
@@ -398,8 +398,8 @@ For more information about jobscripts and MPI execution refer to the [Job submis
 
 By leveraging MKL, R can accelerate certain computations, most notably linear algebra operations on the Xeon Phi accelerator by using Automated Offload. To use MKL Automated Offload, you need to first set this environment variable before R execution:
 
-```bash
-    $ export MKL_MIC_ENABLE=1
+```console
+$ export MKL_MIC_ENABLE=1
 ```
 
 [Read more about automatic offload](../intel-xeon-phi/)
diff --git a/docs.it4i/salomon/software/paraview.md b/docs.it4i/salomon/software/paraview.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce52d69cfa423588bc43361249a19261197e34e5
--- /dev/null
+++ b/docs.it4i/salomon/software/paraview.md
@@ -0,0 +1,81 @@
+# ParaView
+
+Open-Source, Multi-Platform Data Analysis and Visualization Application
+
+## Introduction
+
+**ParaView** is an open-source, multi-platform data analysis and visualization application. ParaView users can quickly build visualizations to analyze their data using qualitative and quantitative techniques. The data exploration can be done interactively in 3D or programmatically using ParaView's batch processing capabilities.
+
+ParaView was developed to analyze extremely large datasets using distributed memory computing resources. It can be run on supercomputers to analyze datasets of exascale size as well as on laptops for smaller data.
+
+Homepage : <http://www.paraview.org/>
+
+## Installed Version
+
+Currently, version 5.1.2 compiled with intel/2017a against intel MPI library and OSMesa 12.0.2 is installed on Salomon.
+
+## Usage
+
+On Salomon, ParaView is to be used in client-server mode. A parallel ParaView server is launched on compute nodes by the user, and client is launched on your desktop PC to control and view the visualization. Download ParaView client application for your OS here: <http://paraview.org/paraview/resources/software.php>.
+
+!!!Warning
+    Your version must match the version number installed on Salomon.
+
+### Launching Server
+
+To launch the server, you must first allocate compute nodes, for example
+
+```console
+$ qsub -I -q qprod -A OPEN-0-0 -l select=2
+```
+
+to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](../job-submission-and-execution/) for details.
+
+After the interactive session is opened, load the ParaView module :
+
+```console
+$ ml ParaView/5.1.2-intel-2017a-mpi
+```
+
+Now launch the parallel server, with number of nodes times 24 processes:
+
+```console
+$ mpirun -np 48 pvserver --use-offscreen-rendering
+    Waiting for client...
+    Connection URL: cs://r37u29n1006:11111
+    Accepting connection(s): r37u29n1006:11111
+```
+
+Note the that the server is listening on compute node r37u29n1006 in this case, we shall use this information later.
+
+### Client Connection
+
+Because a direct connection is not allowed to compute nodes on Salomon, you must establish a SSH tunnel to connect to the server. Choose a port number on your PC to be forwarded to ParaView server, for example 12345. If your PC is running Linux, use this command to establish a SSH tunnel:
+
+```console
+$ ssh -TN -L 12345:r37u29n1006:11111 username@salomon.it4i.cz
+```
+
+replace username with your login and r37u29n1006 with the name of compute node your ParaView server is running on (see previous step). 
+
+If you use PuTTY on Windows, load Salomon connection configuration, then go to *Connection* -> *SSH* -> *Tunnels* to set up the port forwarding.
+
+Fill the Source port and Destination fields. **Do not forget to click the Add button.**
+
+![](../../img/paraview_ssh_tunnel_salomon.png "SSH Tunnel in PuTTY")
+
+Now launch ParaView client installed on your desktop PC. Select *File* -> *Connect...* and fill in the following :
+
+![](../../img/paraview_connect_salomon.png "ParaView - Connect to server")
+
+The configuration is now saved for later use. Now click Connect to connect to the ParaView server. In your terminal where you have interactive session with ParaView server launched, you should see:
+
+```console
+Client connected.
+```
+
+You can now use Parallel ParaView.
+
+### Close Server
+
+Remember to close the interactive session after you finish working with ParaView server, as it will remain launched even after your client is disconnected and will continue to consume resources.
diff --git a/docs.it4i/salomon/storage.md b/docs.it4i/salomon/storage.md
index 8c3e651bca8dc33cc6fcb6283d6cd9778a4fd7dd..544193826d08525400dd1c87d476cfbc09a2cac5 100644
--- a/docs.it4i/salomon/storage.md
+++ b/docs.it4i/salomon/storage.md
@@ -65,14 +65,14 @@ There is default stripe configuration for Salomon Lustre file systems. However,
 
 Use the lfs getstripe for getting the stripe parameters. Use the lfs setstripe command for setting the stripe parameters to get optimal I/O performance The correct stripe setting depends on your needs and file access patterns.
 
-```bash
+```console
 $ lfs getstripe dir | filename
 $ lfs setstripe -s stripe_size -c stripe_count -o stripe_offset dir | filename
 ```
 
 Example:
 
-```bash
+```console
 $ lfs getstripe /scratch/work/user/username
 /scratch/work/user/username
 stripe_count:   1 stripe_size:    1048576 stripe_offset:  -1
@@ -87,7 +87,7 @@ In this example, we view current stripe setting of the /scratch/username/ direct
 
 Use lfs check OSTs to see the number and status of active OSTs for each file system on Salomon. Learn more by reading the man page
 
-```bash
+```console
 $ lfs check osts
 $ man lfs
 ```
@@ -112,13 +112,13 @@ Read more on <http://wiki.lustre.org/manual/LustreManual20_HTML/ManagingStriping
 
 User quotas on the Lustre file systems (SCRATCH) can be checked and reviewed using following command:
 
-```bash
+```console
 $ lfs quota dir
 ```
 
 Example for Lustre SCRATCH directory:
 
-```bash
+```console
 $ lfs quota /scratch
 Disk quotas for user user001 (uid 1234):
      Filesystem kbytes   quota   limit   grace   files   quota   limit   grace
@@ -132,14 +132,14 @@ In this example, we view current quota size limit of 100TB and 8KB currently use
 
 HOME directory is mounted via NFS, so a different command must be used to obtain quota information:
 
-```bash
-     $ quota
+```console
+$ quota
 ```
 
 Example output:
 
-```bash
-    $ quota
+```console
+$ quota
     Disk quotas for user vop999 (uid 1025):
          Filesystem blocks   quota   limit   grace   files   quota   limit   grace
     home-nfs-ib.salomon.it4i.cz:/home
@@ -148,13 +148,13 @@ Example output:
 
 To have a better understanding of where the space is exactly used, you can use following command to find out.
 
-```bash
+```console
 $ du -hs dir
 ```
 
 Example for your HOME directory:
 
-```bash
+```console
 $ cd /home
 $ du -hs * .[a-zA-z0-9]* | grep -E "[0-9]*G|[0-9]*M" | sort -hr
 258M     cuda-samples
@@ -168,11 +168,11 @@ This will list all directories which are having MegaBytes or GigaBytes of consum
 
 To have a better understanding of previous commands, you can read manpages.
 
-```bash
+```console
 $ man lfs
 ```
 
-```bash
+```console
 $ man du
 ```
 
@@ -182,7 +182,7 @@ Extended ACLs provide another security mechanism beside the standard POSIX ACLs
 
 ACLs on a Lustre file system work exactly like ACLs on any Linux file system. They are manipulated with the standard tools in the standard manner. Below, we create a directory and allow a specific user access.
 
-```bash
+```console
 [vop999@login1.salomon ~]$ umask 027
 [vop999@login1.salomon ~]$ mkdir test
 [vop999@login1.salomon ~]$ ls -ld test
@@ -270,6 +270,7 @@ The TEMP workspace resides on SCRATCH file system. The TEMP workspace accesspoin
 
     Users are advised to save the necessary data from the TEMP workspace to HOME or WORK after the calculations and clean up the scratch files.
 
+!!! warning 
     Files on the TEMP file system that are **not accessed for more than 90 days** will be automatically **deleted**.
 
 The TEMP workspace is hosted on SCRATCH file system. The SCRATCH is realized as Lustre parallel file system and is available from all login and computational nodes. Default stripe size is 1 MB, stripe count is 1. There are 54 OSTs dedicated for the SCRATCH file system.
@@ -343,7 +344,7 @@ The procedure to obtain the CESNET access is quick and trouble-free.
 ### Understanding CESNET Storage
 
 !!! note
-    It is very important to understand the CESNET storage before uploading data. [Please read](<https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first>)
+    It is very important to understand the CESNET storage before uploading data. [Please read](https://du.cesnet.cz/en/navody/home-migrace-plzen/start) first.
 
 Once registered for CESNET Storage, you may [access the storage](https://du.cesnet.cz/en/navody/faq/start) in number of ways. We recommend the SSHFS and RSYNC methods.
 
@@ -356,40 +357,40 @@ The SSHFS provides a very convenient way to access the CESNET Storage. The stora
 
 First, create the mount point
 
-```bash
-    $ mkdir cesnet
+```console
+$ mkdir cesnet
 ```
 
 Mount the storage. Note that you can choose among the ssh.du1.cesnet.cz (Plzen), ssh.du2.cesnet.cz (Jihlava), ssh.du3.cesnet.cz (Brno) Mount tier1_home **(only 5120M !)**:
 
-```bash
-    $ sshfs username@ssh.du1.cesnet.cz:. cesnet/
+```console
+$ sshfs username@ssh.du1.cesnet.cz:. cesnet/
 ```
 
 For easy future access from Anselm, install your public key
 
-```bash
-    $ cp .ssh/id_rsa.pub cesnet/.ssh/authorized_keys
+```console
+$ cp .ssh/id_rsa.pub cesnet/.ssh/authorized_keys
 ```
 
 Mount tier1_cache_tape for the Storage VO:
 
-```bash
-    $ sshfs username@ssh.du1.cesnet.cz:/cache_tape/VO_storage/home/username cesnet/
+```console
+$ sshfs username@ssh.du1.cesnet.cz:/cache_tape/VO_storage/home/username cesnet/
 ```
 
 View the archive, copy the files and directories in and out
 
-```bash
-    $ ls cesnet/
-    $ cp -a mydir cesnet/.
-    $ cp cesnet/myfile .
+```console
+$ ls cesnet/
+$ cp -a mydir cesnet/.
+$ cp cesnet/myfile .
 ```
 
 Once done, please remember to unmount the storage
 
-```bash
-    $ fusermount -u cesnet
+```console
+$ fusermount -u cesnet
 ```
 
 ### Rsync Access
@@ -405,16 +406,16 @@ More about Rsync at [here](https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_
 
 Transfer large files to/from CESNET storage, assuming membership in the Storage VO
 
-```bash
-    $ rsync --progress datafile username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
-    $ rsync --progress username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafile .
+```console
+$ rsync --progress datafile username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
+$ rsync --progress username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafile .
 ```
 
 Transfer large directories to/from CESNET storage, assuming membership in the Storage VO
 
-```bash
-    $ rsync --progress -av datafolder username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
-    $ rsync --progress -av username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafolder .
+```console
+$ rsync --progress -av datafolder username@ssh.du1.cesnet.cz:VO_storage-cache_tape/.
+$ rsync --progress -av username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafolder .
 ```
 
 Transfer rates of about 28 MB/s can be expected.
diff --git a/docs.it4i/snippets/fairshare_formula.md b/docs.it4i/snippets/fairshare_formula.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb8ee4a882c077de8c19aca6b04dfc1f3abfaf53
--- /dev/null
+++ b/docs.it4i/snippets/fairshare_formula.md
@@ -0,0 +1,3 @@
+$$
+MAX\_FAIRSHARE * ( 1 - \frac{usage_{Project}}{usage_{Total}} )
+$$
diff --git a/docs.it4i/snippets/job_sort_formula.md b/docs.it4i/snippets/job_sort_formula.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef66c56471a348109cdc5b725f840cefc671bd20
--- /dev/null
+++ b/docs.it4i/snippets/job_sort_formula.md
@@ -0,0 +1,3 @@
+$$
+1000*queue\_priority + \frac{fairshare\_priority}{1000} + \frac{eligible\_time}{864000}
+$$
diff --git a/docs.it4i/snippets/mathjax.md b/docs.it4i/snippets/mathjax.md
new file mode 100644
index 0000000000000000000000000000000000000000..4a3c0d865da60711f22c39eaa6e6757337745086
--- /dev/null
+++ b/docs.it4i/snippets/mathjax.md
@@ -0,0 +1,23 @@
+<script type="text/x-mathjax-config">
+MathJax.Hub.Config({
+    config: ["MMLorHTML.js"],
+    jax: ["input/TeX", "output/HTML-CSS", "output/NativeMML"],
+    extensions: ["tex2jax.js", "MathMenu.js", "MathZoom.js"],
+    tex2jax: {
+        inlineMath: [ ["\\(","\\)"] ],
+        displayMath: [ ["\\[","\\]"] ]
+    },
+    TeX: {
+        TagSide: "right",
+        TagIndent: ".8em",
+        MultLineWidth: "85%",
+        equationNumbers: {
+            autoNumber: "AMS",
+        }
+    },
+    displayAlign: 'left',
+    showProcessingMessages: false,
+    messageStyle: 'none'
+});
+</script>
+<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
diff --git a/docs.it4i/snippets/modules_matrix_search.md b/docs.it4i/snippets/modules_matrix_search.md
new file mode 100644
index 0000000000000000000000000000000000000000..b936364e15723838e1609be9cfb57df526496e85
--- /dev/null
+++ b/docs.it4i/snippets/modules_matrix_search.md
@@ -0,0 +1,41 @@
+<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.slim.min.js" integrity="sha256-/SIrNqv8h6QGKDuNoLGA4iret+kyesCkHGzVUUV0shc=" crossorigin="anonymous"></script>
+<script>
+// override to case insensitive search
+$.expr[":"].contains = $.expr.createPseudo(function(arg) {
+    return function( elem ) {
+        return $(elem).text().toUpperCase().indexOf(arg.toUpperCase()) >= 0;
+    };
+});
+$("#searchInput").keyup(function () {
+    //split the current value of searchInput
+    var data = this.value.split(" ");
+    //create a jquery object of the rows
+    var jo = $("tbody").find("tr");
+    if (this.value == "") {
+        jo.show();
+        return;
+    }
+    //hide all the rows
+    jo.hide();
+
+    //Recusively filter the jquery object to get results.
+    jo.filter(function (i, v) {
+        var $t = $(this);
+        for (var d = 0; d < data.length; ++d) {
+            if ($t.is(":contains('" + data[d] + "')")) {
+                return true;
+            }
+        }
+        return false;
+    })
+    //show the rows that match.
+    .show();
+}).focus(function () {
+    this.value = "";
+    $(this).css({
+        "color": "black"
+    });
+}).css({
+	    "color": "#C0C0C0"
+});
+</script>
diff --git a/docs.it4i/software/bioinformatics.md b/docs.it4i/software/bioinformatics.md
index 76991fe7810ea45fdf7a77ed1cd03adf20a79152..91de9ca9cce57d66c005ee919a0444a852660fac 100644
--- a/docs.it4i/software/bioinformatics.md
+++ b/docs.it4i/software/bioinformatics.md
@@ -6,7 +6,7 @@ In addition to the many applications available through modules (deployed through
 
 ## Starting the Environment
 
-```bash
+```console
 mmokrejs@login2~$ /apps/gentoo/startprefix
 ```
 
@@ -14,7 +14,7 @@ mmokrejs@login2~$ /apps/gentoo/startprefix
 
 Create a template file which can be used and an argument to qsub command. Notably, the 'PBS -S' line specifies full PATH to the Bourne shell of the Gentoo Linux environment.
 
-```bash
+```console
 mmokrejs@login2~$ cat myjob.pbs
 #PBS -S /apps/gentoo/bin/sh
 #PBS -l nodes=1:ppn=16,walltime=12:00:00
@@ -37,14 +37,14 @@ $ qstat
 
 ## Reading Manual Pages for Installed Applications
 
-```bash
+```console
 mmokrejs@login2~$ man -M /apps/gentoo/usr/share/man bwa
 mmokrejs@login2~$ man -M /apps/gentoo/usr/share/man samtools
 ```
 
 ## Listing of Bioinformatics Applications
 
-```bash
+```console
 mmokrejs@login2~$ grep biology /scratch/mmokrejs/gentoo_rap/installed.txt
 sci-biology/ANGLE-bin-20080813-r1
 sci-biology/AlignGraph-9999
@@ -172,7 +172,7 @@ sci-biology/velvetk-20120606
 sci-biology/zmsort-110625
 ```
 
-```bash
+```console
 mmokrejs@login2~$ grep sci-libs /scratch/mmokrejs/gentoo_rap/installed.txt
 sci-libs/amd-2.3.1
 sci-libs/blas-reference-20151113-r1
@@ -228,7 +228,7 @@ sci-libs/umfpack-5.6.2
 
 Gentoo Linux is a allows compilation of its applications from source code while using compiler and optimize flags set to user's wish. This facilitates creation of optimized binaries for the host platform. Users maybe also use several versions of gcc, python and other tools.
 
-```bash
+```console
 mmokrejs@login2~$ gcc-config -l
 mmokrejs@login2~$ java-config -L
 mmokrejs@login2~$ eselect
diff --git a/docs.it4i/software/easybuild.md b/docs.it4i/software/easybuild.md
new file mode 100644
index 0000000000000000000000000000000000000000..344b04ce895052d2b34883b6043d0c9f8cebcdea
--- /dev/null
+++ b/docs.it4i/software/easybuild.md
@@ -0,0 +1,442 @@
+# EasyBuild
+
+The objective of this tutorial is to show how EasyBuild can be used to ease, automate and script the build of software on the IT4Innovations clusters. Two use-cases are considered. First, we are going to build software that is supported by EasyBuild. In a second time, we will see through a simple example how to add support for a new software in EasyBuild.
+
+The benefit of using EasyBuild for your builds is that it allows automated and reproducable build of software. Once a build has been made, the build script (via the EasyConfig file) or the installed software (via the module file) can be shared with other users.
+
+!!! Warning ""
+    You need to have Lmod activated to have the modules produced in Lua language.
+
+## Short Introduction
+
+EasyBuild is a tool that allows to perform automated and reproducible compilation and installation of software.
+
+All builds and installations are performed at user level, so you don't need the admin rights. The software is installed in your home directory (by default in `$HOME/.local/easybuild/software/`) and a module file is generated (by default in `$HOME/.local/easybuild/modules/`) to use the software.
+
+EasyBuild relies on two main concepts
+
+ * Toolchains
+ * EasyConfig file (our easyconfigs is [here](https://code.it4i.cz/sccs/easyconfigs-it4i))
+
+Detailed documentations is available [here](http://easybuild.readthedocs.io).
+
+## Toolchains
+
+A toolchain corresponds to a compiler and a set of libraries which are commonly used to build a software. The two main toolchains frequently used on the IT4Innovations clusters are the **foss** and **intel**.
+
+ * **foss** is based on the GCC compiler and on open-source libraries (OpenMPI, OpenBLAS, etc.).
+ * **intel** is based on the Intel compiler and on Intel libraries (Intel MPI, Intel Math Kernel Library, etc.).
+
+Additional details are available on [here](https://github.com/hpcugent/easybuild/wiki/Compiler-toolchains).
+
+## EasyConfig File
+
+An EasyConfig file is a simple text file that describes the build process of a software. For most software that uses standard procedure (like configure, make and make install), this file is very simple. Many EasyConfig files are already provided with EasyBuild.
+
+By default, EasyConfig files and generated modules are named using the following convention
+
+`software-name-software-version-toolchain-name-toolchain-version(-suffix).eb`
+
+Additional details are available on [here](https://github.com/hpcugent/easybuild-easyconfigs).
+
+## EasyBuild on IT4Innovations Clusters
+
+To use EasyBuild on a compute node, load the EasyBuild module:
+
+```console
+$ ml av EasyBuild
+
+-------------------------- /apps/modules/modulefiles/tools ---------------------
+  EasyBuild/2.8.1    EasyBuild/3.0.0    EasyBuild/3.0.2    EasyBuild/3.1.0 (S,D)
+
+  Where:
+   S:  Module is Sticky, requires --force to unload or purge
+   D:  Default Module
+
+$ ml EasyBuild
+```
+
+The EasyBuild command is eb. Check the version you have loaded:
+
+```console
+$ eb --version
+This is EasyBuild 3.1.0 (framework: 3.1.0, easyblocks: 3.1.0) on host login2
+```
+
+To get help on the EasyBuild options, use the -h or -H option flags:
+
+```console
+$ eb -h
+Usage: eb [options] easyconfig [...]
+
+Builds software based on easyconfig (or parse a directory). Provide one or
+more easyconfigs or directories, use -H or --help more information.
+
+Options:
+  -h                show short help message and exit
+  -H OUTPUT_FORMAT  show full help message and exit
+
+  Debug and logging options (configfile section MAIN):
+    -d              Enable debug log mode (def False)
+
+  Basic options:
+    Basic runtime options for EasyBuild. (configfile section basic)
+...
+```
+
+## Build Software Using Provided EasyConfig File
+
+### Search For Available Easyconfig
+
+Searching for available easyconfig files can be done using the **--search** (long output) and **-S** (short output) command line options. All easyconfig files available in the robot search path are considered and searching is done case-insensitive.
+
+```console
+$ eb -S git
+CFGS1=/apps/easybuild/easyconfigs/easybuild/easyconfigs
+ * $CFGS1/g/git-lfs/git-lfs-1.1.1.eb
+ * $CFGS1/g/git/git-1.7.12-goalf-1.1.0-no-OFED.eb
+ * $CFGS1/g/git/git-1.7.12-goolf-1.4.10.eb
+ * $CFGS1/g/git/git-1.7.12-ictce-4.0.6.eb
+ * $CFGS1/g/git/git-1.7.12-ictce-5.3.0.eb
+ * $CFGS1/g/git/git-1.8.2-cgmpolf-1.1.6.eb
+ * $CFGS1/g/git/git-1.8.2-cgmvolf-1.1.12rc1.eb
+ * $CFGS1/g/git/git-1.8.2-cgmvolf-1.2.7.eb
+ * $CFGS1/g/git/git-1.8.2-cgoolf-1.1.7.eb
+ * $CFGS1/g/git/git-1.8.2-gmvolf-1.7.12.eb
+ * $CFGS1/g/git/git-1.8.2-gmvolf-1.7.12rc1.eb
+ * $CFGS1/g/git/git-1.8.2-goolf-1.4.10.eb
+ * $CFGS1/g/git/git-1.8.3.1-goolf-1.4.10.eb
+ * $CFGS1/g/git/git-1.8.5.6-GCC-4.9.2.eb
+ * $CFGS1/g/git/git-2.10.2.eb
+ * $CFGS1/g/git/git-2.11.0-GNU-4.9.3-2.25.eb
+ * $CFGS1/g/git/git-2.11.0.eb
+ * $CFGS1/g/git/git-2.2.2-GCC-4.9.2.eb
+ * $CFGS1/g/git/git-2.4.1-GCC-4.9.2.eb
+ * $CFGS1/g/git/git-2.7.3-GNU-4.9.3-2.25.eb
+ * $CFGS1/g/git/git-2.7.3-foss-2015g.eb
+ * $CFGS1/g/git/git-2.8.0-GNU-4.9.3-2.25.eb
+ * $CFGS1/g/git/git-2.8.0-foss-2016a.eb
+ * $CFGS1/g/git/git-2.8.0-intel-2017.00.eb
+ * $CFGS1/g/git/git-2.8.0.eb
+```
+
+### Get an Overview of Planned Installations
+
+You can do a “dry-run” overview by supplying **-D**/**--dry-run** (typically combined with **--robot**, in the form of **-Dr**):
+
+```console
+$ eb git-2.8.0.eb -Dr
+eb git-2.8.0.eb -Dr
+== temporary log file in case of crash /tmp/eb-JcU1eA/easybuild-emly2F.log
+Dry run: printing build status of easyconfigs and dependencies
+CFGS=/apps/easybuild/easyconfigs/easybuild/easyconfigs
+ * [x] $CFGS/c/cURL/cURL-7.37.1.eb (module: cURL/7.37.1)
+ * [x] $CFGS/e/expat/expat-2.1.0.eb (module: expat/2.1.0)
+ * [x] $CFGS/g/gettext/gettext-0.19.2.eb (module: gettext/0.19.2)
+ * [x] $CFGS/p/Perl/Perl-5.20.2-bare.eb (module: Perl/5.20.2-bare)
+ * [x] $CFGS/m/M4/M4-1.4.17.eb (module: M4/1.4.17)
+ * [x] $CFGS/a/Autoconf/Autoconf-2.69.eb (module: Autoconf/2.69)
+ * [ ] $CFGS/g/git/git-2.8.0.eb (module: git/2.8.0)
+== Temporary log file(s) /tmp/eb-JcU1eA/easybuild-emly2F.log* have been removed.
+== Temporary directory /tmp/eb-JcU1eA has been removed.
+```
+
+### Compile and Install Module
+
+If we try to build *git-2.8.0.eb*, nothing will be done as it is already installed on the cluster. To enable dependency resolution, use the **--robot** command line option (or **-r** for short):
+
+```console
+$ eb git-2.8.0.eb -r
+== temporary log file in case of crash /tmp/eb-PXe3Zo/easybuild-hEckF4.log
+== git/2.8.0 is already installed (module found), skipping
+== No easyconfigs left to be built.
+== Build succeeded for 0 out of 0
+== Temporary log file(s) /tmp/eb-PXe3Zo/easybuild-hEckF4.log* have been removed.
+== Temporary directory /tmp/eb-PXe3Zo has been removed.
+```
+
+Rebuild *git-2.8.0.eb*. Use eb **--rebuild** to rebuild a given easyconfig/module or use eb **--force**/**-f** to force the reinstallation of a given easyconfig/module. The behavior of **--force** is the same as **--rebuild** and **--ignore-osdeps**.
+
+```console
+$ eb git-2.8.0.eb -r -f
+== temporary log file in case of crash /tmp/eb-JS_Fb5/easybuild-OwJZKn.log
+== resolving dependencies ...
+== processing EasyBuild easyconfig /apps/easybuild/easyconfigs/easybuild/easyconfigs/g/git/git-2.8.0.eb
+== building and installing git/2.8.0...
+== fetching files...
+== creating build dir, resetting environment...
+== unpacking...
+== patching...
+== preparing...
+== configuring...
+== building...
+== testing...
+== installing...
+== taking care of extensions...
+== postprocessing...
+== sanity checking...
+== cleaning up...
+== creating module...
+== permissions...
+== packaging...
+== COMPLETED: Installation ended successfully
+== Results of the build can be found in the log file(s) /apps/all/git/2.8.0/easybuild/easybuild-git-2.8.0-20170221.110059.log
+== Build succeeded for 1 out of 1
+== Temporary log file(s) /tmp/eb-JS_Fb5/easybuild-OwJZKn.log\* have been removed.
+== Temporary directory /tmp/eb-JS_Fb5 has been removed.
+```
+
+If we try to build *git-2.11.0.eb*:
+
+```console
+== temporary log file in case of crash /tmp/eb-JS_Fb5/easybuild-OwXCKn.log
+== resolving dependencies ...
+== processing EasyBuild easyconfig /apps/easybuild/easyconfigs/easybuild/easyconfigs/g/git/git-2.11.0.eb
+== building and installing git/2.11.0...
+== fetching files...
+== creating build dir, resetting environment...
+== unpacking...
+== patching...
+== preparing...
+== configuring...
+== building...
+== testing...
+== installing...
+== taking care of extensions...
+== postprocessing...
+== sanity checking...
+== cleaning up...
+== creating module...
+== permissions...
+== packaging...
+== COMPLETED: Installation ended successfully
+== Results of the build can be found in the log file(s) /apps/all/git/2.11.0/easybuild/easybuild-git-2.11.0-20170221.110059.log
+== Build succeeded for 1 out of 1
+== Temporary log file(s) /tmp/eb-JS_Fb5/easybuild-OwXCKn.log\* have been removed.
+== Temporary directory /tmp/eb-JS_Fb5 has been removed.
+```
+
+If we try to build *git-2.11.1*, but we used easyconfig *git-2.11.0.eb* - change version command **--try-software-version=2.11.1**:
+
+```console
+$ eb git-2.11.0.eb -r --try-software-version=2.11.1
+== temporary log file in case of crash /tmp/eb-oisi0q/easybuild-2rNh7I.log
+== resolving dependencies ...
+== processing EasyBuild easyconfig /tmp/eb-oisi0q/tweaked_easyconfigs/git-2.11.1.eb
+== building and installing git/2.11.1...
+== fetching files...
+== creating build dir, resetting environment...
+== unpacking...
+== patching...
+== preparing...
+== configuring...
+== building...
+== testing...
+== installing...
+== taking care of extensions...
+== postprocessing...
+== sanity checking...
+== cleaning up...
+== creating module...
+== permissions...
+== packaging...
+== COMPLETED: Installation ended successfully
+== Results of the build can be found in the log file(s) /apps/all/git/2.11.1/easybuild/easybuild-git-2.11.1-20170221.111005.log
+== Build succeeded for 1 out of 1
+== Temporary log file(s) /tmp/eb-oisi0q/easybuild-2rNh7I.log\* have been removed.
+== Temporary directory /tmp/eb-oisi0q has been removed.
+```
+
+and try to build *git-2.11.1-intel-2017a*, but we used easyconfig *git-2.11.0.eb* - change toolchains **--try-toolchain-name=intel --try-toolchain-version=2017a** or **--try-toolchain=intel,2017a**:
+
+```console
+$ eb git-2.11.0.eb -r --try-toolchain=intel,2017a
+== temporary log file in case of crash /tmp/eb-oisi0q/easybuild-2Trh7I.log
+== resolving dependencies ...
+== processing EasyBuild easyconfig /tmp/eb-oisi0q/tweaked_easyconfigs/git-2.11.1-intel-2017a.eb
+== building and installing git/2.11.1-intel-2017a...
+== fetching files...
+== creating build dir, resetting environment...
+== unpacking...
+== patching...
+== preparing...
+== configuring...
+== building...
+== testing...
+== installing...
+== taking care of extensions...
+== postprocessing...
+== sanity checking...
+== cleaning up...
+== creating module...
+== permissions...
+== packaging...
+== COMPLETED: Installation ended successfully
+== Results of the build can be found in the log file(s) /apps/all/git/2.11.1-intel-2017a/easybuild/easybuild-git-2.11.1-20170221.111005.log
+== Build succeeded for 1 out of 1
+== Temporary log file(s) /tmp/eb-oisi0q/easybuild-2Trh7I.log\* have been removed.
+== Temporary directory /tmp/eb-oisi0q has been removed.
+```
+
+### MODULEPATH
+
+To see the newly installed modules, you need to add the path where they were installed to the MODULEPATH. On the cluster you have to use the `module use` command:
+
+```console
+$ module use $HOME/.local/easybuild/modules/all/
+```
+
+or modify your `.bash_profile`:
+
+```console
+$ cat ~/.bash_profile
+# .bash_profile
+
+# Get the aliases and functions
+if [ -f ~/.bashrc ]; then
+. ~/.bashrc
+fi
+
+# User specific environment and startup programs
+
+module use $HOME/.local/easybuild/modules/all/
+
+PATH=$PATH:$HOME/bin
+
+export PATH
+```
+
+## Build Software Using Your Own EasyConfig File
+
+For this example, we create an EasyConfig file to build Git 2.11.1 with *foss* toolchain. Open your favorite editor and create a file named *git-2.11.1-foss-2017a.eb* with the following content:
+
+```console
+$ vim git-2.11.1-foss-2017a.eb
+```
+
+```python
+easyblock = 'ConfigureMake'
+
+name = 'git'
+version = '2.11.1'
+
+homepage = 'http://git-scm.com/'
+description = """Git is a free and open source distributed version control system designed
+to handle everything from small to very large projects with speed and efficiency."""
+
+toolchain = {'name': 'foss', 'version': '2017a'}
+
+sources = ['v%(version)s.tar.gz']
+source_urls = ['https://github.com/git/git/archive']
+
+builddependencies = [('Autoconf', '2.69')]
+
+dependencies = [
+    ('cURL', '7.37.1'),
+    ('expat', '2.1.0'),
+    ('gettext', '0.19.2'),
+    ('Perl', '5.20.2'),
+]
+
+preconfigopts = 'make configure && '
+
+# Work around git build system bug.  If LIBS contains -lpthread, then configure
+# will not append -lpthread to LDFLAGS, but Makefile ignores LIBS.
+configopts = "--with-perl=${EBROOTPERL}/bin/perl --enable-pthreads='-lpthread'"
+
+sanity_check_paths = {
+    'files': ['bin/git'],
+    'dirs': [],
+}
+
+moduleclass = 'tools'
+```
+
+This is a simple EasyConfig. Most of the fields are self-descriptive. No build method is explicitely defined, so it uses by default the standard configure/make/make install approach.
+
+Let's build Git with this EasyConfig file:
+
+```console
+$ eb ./git-2.11.1-foss-2017a.eb -r
+== temporary log file in case of crash /tmp/eb-oisi0q/easybuild-2Tii7I.log
+== resolving dependencies ...
+== processing EasyBuild easyconfig /home/username/git-2.11.1-foss-2017a.eb
+== building and installing git/2.11.1-foss-2017a...
+== fetching files...
+== creating build dir, resetting environment...
+== unpacking...
+== patching...
+== preparing...
+== configuring...
+== building...
+== testing...
+== installing...
+== taking care of extensions...
+== postprocessing...
+== sanity checking...
+== cleaning up...
+== creating module...
+== permissions...
+== packaging...
+== COMPLETED: Installation ended successfully
+== Results of the build can be found in the log file(s) /home/username/.local/easybuild/modules/all/git/2.11.1-foss-2017a/easybuild/easybuild-git-2.11.1-20170221.111005.log
+== Build succeeded for 1 out of 1
+== Temporary log file(s) /tmp/eb-oisi0q/easybuild-2Tii7I.log\* have been removed.
+== Temporary directory /tmp/eb-oisi0q has been removed.
+```
+
+We can now check that our version of Git is available via the modules:
+
+```console
+$ ml av git
+
+-------------------------------- /apps/modules/modulefiles/tools -------------------------
+   git/2.8.0-GNU-4.9.3-2.25    git/2.11.0-GNU-4.9.3-2.25    git/2.11.1-GNU-4.9.3-2.25 (D)
+
+-------------------------------- /home/username/.local/easybuild/modules/all -------------
+   git/2.11.1-foss-2017a
+
+  Where:
+   D:  Default Module
+
+  If you need software that is not listed, request it at support@it4i.cz.
+```
+
+## Submitting Build Jobs (Experimental)
+
+Using the **--job** command line option, you can instruct EasyBuild to submit jobs for the installations that should be performed, rather than performing the installations locally on the system you are on.
+
+```console
+$ eb git-2.11.0-GNU-4.9.3-2.25.eb -r --job
+== temporary log file in case of crash /tmp/eb-zeLzBb/easybuild-H_Z0fB.log
+== resolving dependencies ...
+== GC3Pie job overview: 1 submitted (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 running (total: 1)
+== GC3Pie job overview: 1 terminated, 1 ok (total: 1)
+== GC3Pie job overview: 1 terminated, 1 ok (total: 1)
+== Done processing jobs
+== GC3Pie job overview: 1 terminated, 1 ok (total: 1)
+== Submitted parallel build jobs, exiting now
+== Temporary log file(s) /tmp/eb-zeLzBb/easybuild-H_Z0fB.log* have been removed.
+== Temporary directory /tmp/eb-zeLzBb has been removed.
+```
+
+!!! note ""
+    Salomon jobs ... XXXXX.isrv5
+
+    Anselm jobs ... XXXXX.dm2
+
+```console
+$ qstat -u username -w
+                                                                                                    Req'd  Req'd   Elap
+Job ID                         Username        Queue           Jobname         SessID   NDS  TSK   Memory Time  S Time
+------------------------------ --------------- --------------- --------------- -------- ---- ----- ------ ----- - -----
+1319314.dm2                    username        qprod           git-2.11.0-GNU-    85605    1    16    --  24:00 R 00:00:17
+```
diff --git a/docs.it4i/software/eb.md b/docs.it4i/software/eb.md
deleted file mode 100644
index cde35d8897f81963bc1db77a0412c023b6a7c7a0..0000000000000000000000000000000000000000
--- a/docs.it4i/software/eb.md
+++ /dev/null
@@ -1 +0,0 @@
-# EasyBuild
diff --git a/docs.it4i/software/lmod.md b/docs.it4i/software/lmod.md
index 3ddd5cc1d1951de11047ea7cdfca91198d11aa19..24a93cbc8873a822a74950b720973bec6783284e 100644
--- a/docs.it4i/software/lmod.md
+++ b/docs.it4i/software/lmod.md
@@ -2,42 +2,11 @@
 
 Lmod is a modules tool, a modern alternative to the oudated & no longer actively maintained Tcl-based environment modules tool.
 
-Detailed documentation on Lmod is available at [here](http://lmod.readthedocs.io).
+Detailed documentation on Lmod is available [here](http://lmod.readthedocs.io).
 
 !!! warning
     All the new modules will be availabe in Lmod environment only.
 
-## Important Dates
-
-| Date       | Action                 |
-| ---------- | ---------------------- |
-| 2017-02-01 | Testing phase          |
-| 2017-03-01 | Global deployment lmod |
-
-## How to Activate Testing Lmod Enviroment?
-
-Create folder or file `.lmod` into your home folder. Logout and login. New Lmod enviroment will be active now.
-
-```bash
-$ mkdir ~/.lmod
-$ logout
-Connection to login4.salomon.it4i.cz closed.
-
-local~$ ssh vop999@login.it4i.cz
-                    _____       _
-                   / ____|     | |
-                  | (___   __ _| | ___  _ __ ___   ___  _ __
-                   \___ \ / _` | |/ _ \| '_ ` _ \ / _ \| '_ \
-                   ____) | (_| | | (_) | | | | | | (_) | | | |
-                  |_____/ \__,_|_|\___/|_| |_| |_|\___/|_| |_|
-
-                        http://www.it4i.cz/?lang=en
-
-$
-$ ml
-No modules loaded
-```
-
 ## Benefits
 
 * significantly more responsive module commands, in particular module avail (ml av)
@@ -65,7 +34,7 @@ Below you will find more details and examples.
 
 To get an overview of the currently loaded modules, use module list or ml (without specifying extra arguments).
 
-```bash
+```console
 $ ml
 Currently Loaded Modules:
    1) EasyBuild/3.0.0 (S)  2) lmod/7.2.2
@@ -80,7 +49,7 @@ Currently Loaded Modules:
 
 To get an overview of all available modules, you can use ml avail or simply ml av:
 
-```bash
+```console
 $ ml av
 ---------------------------------------- /apps/modules/compiler ----------------------------------------------
    GCC/5.2.0    GCCcore/6.2.0 (D)    icc/2013.5.192     ifort/2013.5.192    LLVM/3.9.0-intel-2017.00 (D)
@@ -104,7 +73,7 @@ In the current module naming scheme, each module name consists of two parts:
 
 If you just provide a software name, for example gcc, it prints on overview of all available modules for GCC.
 
-```bash
+```console
 $ ml spider gcc
 ---------------------------------------------------------------------------------
   GCC:
@@ -147,7 +116,7 @@ $ ml spider gcc
 
 If you use spider on a full module name like GCC/6.2.0-2.27 it will tell on which cluster(s) that module available:
 
-```bash
+```console
 $ module spider GCC/6.2.0-2.27
 --------------------------------------------------------------------------------------------------------------
   GCC: GCC/6.2.0-2.27
@@ -169,7 +138,7 @@ This tells you what the module contains and a URL to the homepage of the softwar
 To check which modules are available for a particular software package, you can provide the software name to ml av.
 For example, to check which versions of git are available:
 
-```bash
+```console
 $ ml av git
 
 -------------------------------------- /apps/modules/tools ----------------------------------------
@@ -187,7 +156,7 @@ Use "module keyword key1 key2 ..." to search for all possible modules matching a
 
 Lmod does a partial match on the module name, so sometimes you need to use / to indicate the end of the software name you are interested in:
 
-```bash
+```console
 $ ml av GCC/
 
 ------------------------------------------ /apps/modules/compiler -------------------------------------------
@@ -204,7 +173,7 @@ Use "module keyword key1 key2 ..." to search for all possible modules matching a
 
 To see how a module would change the environment, use ml show:
 
-```bash
+```console
 $ ml show Python/3.5.2
 
 help([[Python is a programming language that lets you work more quickly and integrate your systems more effectively. - Homepage: http://python.org/]])
@@ -240,7 +209,7 @@ If you're not sure what all of this means: don't worry, you don't have to know,
 The effectively apply the changes to the environment that are specified by a module, use ml and specify the name of the module.
 For example, to set up your environment to use intel:
 
-```bash
+```console
 $ ml intel/2017.00
 $ ml
 Currently Loaded Modules:
@@ -275,7 +244,7 @@ In addition, only **one single version** of each software package can be loaded
 To revert the changes to the environment that were made by a particular module, you can use ml -<modname>.
 For example:
 
-```bash
+```console
 $ ml
 Currently Loaded Modules:
   1) EasyBuild/3.0.0 (S)   2) lmod/7.2.2
@@ -299,7 +268,7 @@ $ which gcc
 
 To reset your environment back to a clean state, you can use ml purge or ml purge --force:
 
-```bash
+```console
 $ ml
 Currently Loaded Modules:
   1) EasyBuild/3.0.0 (S)   2) lmod/7.2.2   3) GCCcore/6.2.0   4) binutils/2.27-GCCcore-6.2.0 (H)
@@ -323,25 +292,25 @@ If you have a set of modules that you need to load often, you can save these in
 
 First, load all the modules you need, for example:
 
-```bash
-ml intel/2017.00 Python/3.5.2-intel-2017.00
+```console
+$ ml intel/2017.00 Python/3.5.2-intel-2017.00
 ```
 
 Now store them in a collection using ml save:
 
-```bash
+```console
 $ ml save my-collection
 ```
 
 Later, for example in a job script, you can reload all these modules with ml restore:
 
-```bash
+```console
 $ ml restore my-collection
 ```
 
 With ml savelist can you gets a list of all saved collections:
 
-```bash
+```console
 $ ml savelist
 Named collection list:
   1) my-collection
diff --git a/docs.it4i/software/orca.md b/docs.it4i/software/orca.md
index 8fcfd69bfb44f9f978b18d8b8ac4e82a71653f36..3f62415459eceea55e4268d3bd2ca301748e0ce2 100644
--- a/docs.it4i/software/orca.md
+++ b/docs.it4i/software/orca.md
@@ -6,13 +6,13 @@ ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum c
 
 The following module command makes the latest version of orca available to your session
 
-```bash
+```console
 $ module load ORCA/3_0_3-linux_x86-64
 ```
 
 ### Dependency
 
-```bash
+```console
 $ module list
 Currently Loaded Modulefiles:
   1) /opt/modules/modulefiles/oscar-modules/1.0.3(default)
@@ -46,7 +46,7 @@ Create a file called orca_serial.inp that contains the following orca commands
 
 Create a Sun Grid Engine submission file called submit_serial.sh that looks like this
 
-```bash
+```console
 !/bin/bash
 
 module load ORCA/3_0_3-linux_x86-64
@@ -55,7 +55,7 @@ orca orca_serial.inp
 
 Submit the job to the queue with the command
 
-```bash
+```console
 $ qsub -q qexp -I -l select=1
 qsub: waiting for job 196821.isrv5 to start
 qsub: job 196821.isrv5 ready
diff --git a/docs.it4i/software/singularity.md b/docs.it4i/software/singularity.md
new file mode 100644
index 0000000000000000000000000000000000000000..39618e32c735f1ef1dd02447014015518f51e342
--- /dev/null
+++ b/docs.it4i/software/singularity.md
@@ -0,0 +1,128 @@
+[Singularity](http://singularity.lbl.gov/) enables users to have full control of their environment. A non-privileged user can "swap out" the operating system on the host for one they control. So if the host system is running RHEL6 but your application runs in Ubuntu/RHEL7, you can create an Ubuntu/RHEL7 image, install your applications into that image, copy the image to another host, and run your application on that host in it’s native Ubuntu/RHEL7 environment.
+
+Singularity also allows you to leverage the resources of whatever host you are on. This includes HPC interconnects, resource managers, file systems, GPUs and/or accelerators, etc. Singularity does this by enabling several key facets:
+
+* Encapsulation of the environment
+* Containers are image based
+* No user contextual changes or root escalation allowed
+* No root owned daemon processes
+
+## Using Docker Images
+
+Singularity can import, bootstrap, and even run Docker images directly from [Docker Hub](https://hub.docker.com/). You can easily run RHEL7 like this:
+
+```console
+[hrb33@r33u01n865 ~]$ cat /etc/redhat-release 
+CentOS release 6.7 (Final)
+[hrb33@r33u01n865 ~]$ ml Singularity
+[hrb33@r33u01n865 ~]$ singularity shell docker://centos:latest
+library/centos:latest
+Downloading layer: sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
+Downloading layer: sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
+Downloading layer: sha256:45a2e645736c4c66ef34acce2407ded21f7a9b231199d3b92d6c9776df264729
+Downloading layer: sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
+Singularity: Invoking an interactive shell within container...
+
+Singularity.centos:latest> cat /etc/redhat-release 
+CentOS Linux release 7.3.1611 (Core) 
+```
+
+## Creating Own Image from Docker Image
+
+```console
+hrb33@hrb33-toshiba:/$ cd /tmp/
+hrb33@hrb33-toshiba:/tmp$ sudo singularity create /tmp/c7.img
+[sudo] password for hrb33: 
+Creating a new image with a maximum size of 768MiB...
+Executing image create helper
+Formatting image with ext3 file system
+Done.
+hrb33@hrb33-toshiba:/tmp$ sudo singularity import c7.img docker://centos:latest
+library/centos:latest
+Downloading layer: sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
+Downloading layer: sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
+Downloading layer: sha256:45a2e645736c4c66ef34acce2407ded21f7a9b231199d3b92d6c9776df264729
+Downloading layer: sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
+Adding Docker CMD as Singularity runscript...
+Bootstrap initialization
+No bootstrap definition passed, updating container
+Executing Prebootstrap module
+Executing Postbootstrap module
+Done.
+hrb33@hrb33-toshiba:/tmp$ sudo singularity shell --writable c7.img 
+Singularity: Invoking an interactive shell within container...
+
+Singularity.c7.img> mkdir /apps /scratch
+Singularity.c7.img> exit
+hrb33@hrb33-toshiba:/tmp$ rsync -av c7.img  hrb33@login4.salomon:/home/hrb33/c7.img
+sending incremental file list
+c7.img
+
+sent 805,503,090 bytes  received 34 bytes  9,205,749.99 bytes/sec
+total size is 805,306,399  speedup is 1.00
+
+```
+
+Accessing /HOME and /SCRATCH Within Container
+
+```console
+hrb33@hrb33-toshiba:/tmp$ ssh hrb33@login4.salomon
+
+                    _____       _                             
+                   / ____|     | |                            
+                  | (___   __ _| | ___  _ __ ___   ___  _ __  
+                   \___ \ / _` | |/ _ \| '_ ` _ \ / _ \| '_ \ 
+                   ____) | (_| | | (_) | | | | | | (_) | | | |
+                  |_____/ \__,_|_|\___/|_| |_| |_|\___/|_| |_|
+
+                        http://www.it4i.cz/?lang=en
+
+
+Last login: Fri Feb 10 14:38:36 2017 from 10.0.131.12
+[hrb33@login4.salomon ~]$ ml Singularity
+[hrb33@login4.salomon ~]$ singularity shell --bind /scratch --bind /apps --writable c7.img 
+Singularity: Invoking an interactive shell within container...
+
+Singularity.c7.img> ls /apps/ -l
+total 68
+drwx------   4 root root   29 Sep 29 10:28 SCS
+drwxrwxr-x 301 2757 2796 8192 Feb 16 10:58 all
+drwxrwxr-x   3 2757 2796   19 Jul  9  2015 base
+drwxrwxr-x  16 2757 2796 4096 Nov 24 21:47 bio
+drwxrwxr-x  10 2757 2796  116 Apr  8  2016 cae
+drwxrwxr-x  18 2757 2796 4096 Jan 17 09:49 chem
+drwxrwxr-x  11 2757 2796  122 Dec  7 09:25 compiler
+drwxrwxr-x   7 2757 2796   73 Jun 29  2016 data
+drwxr-xr-x   7 2757 2796   88 Jan  8  2016 debugger
+drwxrwxr-x  38 2757 2796 4096 Feb 16 13:37 devel
+drwxrwxr-x   9 2757 2796  130 Jan  9 08:40 easybuild
+drwxr-xr-x  11 3900 4011 4096 Feb 15 09:50 gentoo
+drwxr-xr-x  10 3900 4011 4096 Feb 10 17:01 gentoo_uv
+drwxrwxr-x   5 2757 2796   39 Jan 18  2016 geo
+drwxr-xr-x  18 2757 2796 4096 Sep  6 16:03 intel2017
+drwxrwxr-x  20 2757 2796 4096 Nov 28 08:50 lang
+drwxrwxr-x  31 2757 2796 4096 Dec  7 07:48 lib
+drwxrwxr-x   4 2757 2796   32 Nov  9 09:19 licenses
+drwxrwxr-x  17 2757 2796 4096 Nov 15 09:24 math
+drwxr-xr-x  22 2757 2796 4096 Jan 19 13:15 modules
+drwxrwxr-x   8 2757 2796   82 Apr 18  2016 mpi
+drwxrwxr-x  13 2757 2796 4096 Oct 24 09:08 numlib
+drwxrwxr-x  10 2757 2796  108 Feb  3 11:01 perf
+drwxrwxr-x   5 2757 2796   41 Jan 17 09:49 phys
+drwxrwxr-x   2 2757 2796    6 Feb  3 11:01 prace
+drwxr-xr-x   4 root root   36 Jun 18  2015 sw
+drwxrwxr-x   5 2757 2796   49 Feb 15  2016 system
+drwxr-xr-x   3 root root   19 Dec  4  2015 test
+drwxrwxr-x  13 2757 2796  138 May 31  2016 toolchain
+drwxrwxr-x  39 2757 2796 4096 Feb  3 11:27 tools
+drwxr-xr-x   4 root root   31 Aug 11  2015 user
+drwxrwxr-x  21 2757 2796 4096 Jan  5 18:56 uv
+drwxrwxr-x  40 2757 2796 4096 Feb  3 11:01 vis
+Singularity.c7.img> ls /scratch/ -l
+total 32
+drwx------   3 root root  4096 Aug 15  2016 backup
+drwxr-x---   2 root root  4096 Dec  5 10:34 sys
+drwxrwxrwt 154 root root 20480 Feb 14 14:03 temp
+drwxr-xr-x   4 root root  4096 Jan 25 10:48 work
+Singularity.c7.img> 
+```
diff --git a/docs.it4i/src/css.css b/docs.it4i/src/css.css
index 2f9e047eda1cb0889c7fc52b6e0d2a51c71878ec..aa07e9978b8b5b682942a07c08ae66179a4ffb4d 100644
--- a/docs.it4i/src/css.css
+++ b/docs.it4i/src/css.css
@@ -2,7 +2,11 @@ a:not([href*="//"]) {
     /* CSS for internal links */
 }
 
-a[href*="//"]:not( [href*='gitlab.it4i.cz'] ):not( [href*='code.it4i.cz'] ) {
+a.md-footer-social__link.fa.fa-globe {
+    !background: none;
+}
+
+a[href*="//"]:not( [href*='gitlab.it4i.cz'] ):not( [href*='code.it4i.cz'] ):not( [href*='https://www.it4i.cz'] )  {
     /*CSS for external links */
     background: transparent url("/img/external.png") no-repeat right 0px top 1px;
     background-size: 12px;
diff --git a/material/assets/javascripts/application-16f434a21a.js b/material/assets/javascripts/application-16f434a21a.js
deleted file mode 100644
index 057cdeebe70cef404cb1d360d0d15feb913120d0..0000000000000000000000000000000000000000
--- a/material/assets/javascripts/application-16f434a21a.js
+++ /dev/null
@@ -1,58 +0,0 @@
-var Application=function(t){function e(r){if(n[r])return n[r].exports;var o=n[r]={exports:{},id:r,loaded:!1};return t[r].call(o.exports,o,o.exports,e),o.loaded=!0,o.exports}var n={};return e.m=t,e.c=n,e.p="",e(0)}([function(t,e,n){n(1),n(65),n(66),t.exports=n(67)},function(t,e,n){"use strict";n(2),n(22),n(48),n(52),t.exports=n(21).Promise},function(t,e,n){"use strict";var r=n(3),o={};o[n(5)("toStringTag")]="z",o+""!="[object z]"&&n(9)(Object.prototype,"toString",function(){return"[object "+r(this)+"]"},!0)},function(t,e,n){"use strict";var r=n(4),o=n(5)("toStringTag"),i="Arguments"==r(function(){return arguments}()),s=function(t,e){try{return t[e]}catch(t){}};t.exports=function(t){var e,n,a;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(n=s(e=Object(t),o))?n:i?r(e):"Object"==(a=r(e))&&"function"==typeof e.callee?"Arguments":a}},function(t,e){"use strict";var n={}.toString;t.exports=function(t){return n.call(t).slice(8,-1)}},function(t,e,n){"use strict";var r=n(6)("wks"),o=n(8),i=n(7).Symbol,s="function"==typeof i,a=t.exports=function(t){return r[t]||(r[t]=s&&i[t]||(s?i:o)("Symbol."+t))};a.store=r},function(t,e,n){"use strict";var r=n(7),o="__core-js_shared__",i=r[o]||(r[o]={});t.exports=function(t){return i[t]||(i[t]={})}},function(t,e){"use strict";var n=t.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(t,e){"use strict";var n=0,r=Math.random();t.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++n+r).toString(36))}},function(t,e,n){"use strict";var r=n(7),o=n(10),i=n(20),s=n(8)("src"),a="toString",u=Function[a],c=(""+u).split(a);n(21).inspectSource=function(t){return u.call(t)},(t.exports=function(t,e,n,a){var u="function"==typeof n;u&&(i(n,"name")||o(n,"name",e)),t[e]!==n&&(u&&(i(n,s)||o(n,s,t[e]?""+t[e]:c.join(String(e)))),t===r?t[e]=n:a?t[e]?t[e]=n:o(t,e,n):(delete t[e],o(t,e,n)))})(Function.prototype,a,function(){return"function"==typeof this&&this[s]||u.call(this)})},function(t,e,n){"use strict";var r=n(11),o=n(19);t.exports=n(15)?function(t,e,n){return r.f(t,e,o(1,n))}:function(t,e,n){return t[e]=n,t}},function(t,e,n){"use strict";var r=n(12),o=n(14),i=n(18),s=Object.defineProperty;e.f=n(15)?Object.defineProperty:function(t,e,n){if(r(t),e=i(e,!0),r(n),o)try{return s(t,e,n)}catch(t){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(t[e]=n.value),t}},function(t,e,n){"use strict";var r=n(13);t.exports=function(t){if(!r(t))throw TypeError(t+" is not an object!");return t}},function(t,e){"use strict";var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};t.exports=function(t){return"object"===("undefined"==typeof t?"undefined":n(t))?null!==t:"function"==typeof t}},function(t,e,n){"use strict";t.exports=!n(15)&&!n(16)(function(){return 7!=Object.defineProperty(n(17)("div"),"a",{get:function(){return 7}}).a})},function(t,e,n){"use strict";t.exports=!n(16)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(t,e){"use strict";t.exports=function(t){try{return!!t()}catch(t){return!0}}},function(t,e,n){"use strict";var r=n(13),o=n(7).document,i=r(o)&&r(o.createElement);t.exports=function(t){return i?o.createElement(t):{}}},function(t,e,n){"use strict";var r=n(13);t.exports=function(t,e){if(!r(t))return t;var n,o;if(e&&"function"==typeof(n=t.toString)&&!r(o=n.call(t)))return o;if("function"==typeof(n=t.valueOf)&&!r(o=n.call(t)))return o;if(!e&&"function"==typeof(n=t.toString)&&!r(o=n.call(t)))return o;throw TypeError("Can't convert object to primitive value")}},function(t,e){"use strict";t.exports=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}}},function(t,e){"use strict";var n={}.hasOwnProperty;t.exports=function(t,e){return n.call(t,e)}},function(t,e){"use strict";var n=t.exports={version:"2.4.0"};"number"==typeof __e&&(__e=n)},function(t,e,n){"use strict";var r=n(23)(!0);n(26)(String,"String",function(t){this._t=String(t),this._i=0},function(){var t,e=this._t,n=this._i;return n>=e.length?{value:void 0,done:!0}:(t=r(e,n),this._i+=t.length,{value:t,done:!1})})},function(t,e,n){"use strict";var r=n(24),o=n(25);t.exports=function(t){return function(e,n){var i,s,a=String(o(e)),u=r(n),c=a.length;return u<0||u>=c?t?"":void 0:(i=a.charCodeAt(u),i<55296||i>56319||u+1===c||(s=a.charCodeAt(u+1))<56320||s>57343?t?a.charAt(u):i:t?a.slice(u,u+2):(i-55296<<10)+(s-56320)+65536)}}},function(t,e){"use strict";var n=Math.ceil,r=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?r:n)(t)}},function(t,e){"use strict";t.exports=function(t){if(void 0==t)throw TypeError("Can't call method on  "+t);return t}},function(t,e,n){"use strict";var r=n(27),o=n(28),i=n(9),s=n(10),a=n(20),u=n(31),c=n(32),l=n(45),f=n(46),d=n(5)("iterator"),h=!([].keys&&"next"in[].keys()),p="@@iterator",v="keys",y="values",m=function(){return this};t.exports=function(t,e,n,g,b,w,_){c(n,e,g);var S,x,E,k=function(t){if(!h&&t in P)return P[t];switch(t){case v:return function(){return new n(this,t)};case y:return function(){return new n(this,t)}}return function(){return new n(this,t)}},T=e+" Iterator",O=b==y,C=!1,P=t.prototype,A=P[d]||P[p]||b&&P[b],M=A||k(b),j=b?O?k("entries"):M:void 0,L="Array"==e?P.entries||A:A;if(L&&(E=f(L.call(new t)),E!==Object.prototype&&(l(E,T,!0),r||a(E,d)||s(E,d,m))),O&&A&&A.name!==y&&(C=!0,M=function(){return A.call(this)}),r&&!_||!h&&!C&&P[d]||s(P,d,M),u[e]=M,u[T]=m,b)if(S={values:O?M:k(y),keys:w?M:k(v),entries:j},_)for(x in S)x in P||i(P,x,S[x]);else o(o.P+o.F*(h||C),e,S);return S}},function(t,e){"use strict";t.exports=!1},function(t,e,n){"use strict";var r=n(7),o=n(21),i=n(10),s=n(9),a=n(29),u="prototype",c=function t(e,n,c){var l,f,d,h,p=e&t.F,v=e&t.G,y=e&t.S,m=e&t.P,g=e&t.B,b=v?r:y?r[n]||(r[n]={}):(r[n]||{})[u],w=v?o:o[n]||(o[n]={}),_=w[u]||(w[u]={});v&&(c=n);for(l in c)f=!p&&b&&void 0!==b[l],d=(f?b:c)[l],h=g&&f?a(d,r):m&&"function"==typeof d?a(Function.call,d):d,b&&s(b,l,d,e&t.U),w[l]!=d&&i(w,l,h),m&&_[l]!=d&&(_[l]=d)};r.core=o,c.F=1,c.G=2,c.S=4,c.P=8,c.B=16,c.W=32,c.U=64,c.R=128,t.exports=c},function(t,e,n){"use strict";var r=n(30);t.exports=function(t,e,n){if(r(t),void 0===e)return t;switch(n){case 1:return function(n){return t.call(e,n)};case 2:return function(n,r){return t.call(e,n,r)};case 3:return function(n,r,o){return t.call(e,n,r,o)}}return function(){return t.apply(e,arguments)}}},function(t,e){"use strict";t.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},function(t,e){"use strict";t.exports={}},function(t,e,n){"use strict";var r=n(33),o=n(19),i=n(45),s={};n(10)(s,n(5)("iterator"),function(){return this}),t.exports=function(t,e,n){t.prototype=r(s,{next:o(1,n)}),i(t,e+" Iterator")}},function(t,e,n){"use strict";var r=n(12),o=n(34),i=n(43),s=n(42)("IE_PROTO"),a=function(){},u="prototype",c=function(){var t,e=n(17)("iframe"),r=i.length,o="<",s=">";for(e.style.display="none",n(44).appendChild(e),e.src="javascript:",t=e.contentWindow.document,t.open(),t.write(o+"script"+s+"document.F=Object"+o+"/script"+s),t.close(),c=t.F;r--;)delete c[u][i[r]];return c()};t.exports=Object.create||function(t,e){var n;return null!==t?(a[u]=r(t),n=new a,a[u]=null,n[s]=t):n=c(),void 0===e?n:o(n,e)}},function(t,e,n){"use strict";var r=n(11),o=n(12),i=n(35);t.exports=n(15)?Object.defineProperties:function(t,e){o(t);for(var n,s=i(e),a=s.length,u=0;a>u;)r.f(t,n=s[u++],e[n]);return t}},function(t,e,n){"use strict";var r=n(36),o=n(43);t.exports=Object.keys||function(t){return r(t,o)}},function(t,e,n){"use strict";var r=n(20),o=n(37),i=n(39)(!1),s=n(42)("IE_PROTO");t.exports=function(t,e){var n,a=o(t),u=0,c=[];for(n in a)n!=s&&r(a,n)&&c.push(n);for(;e.length>u;)r(a,n=e[u++])&&(~i(c,n)||c.push(n));return c}},function(t,e,n){"use strict";var r=n(38),o=n(25);t.exports=function(t){return r(o(t))}},function(t,e,n){"use strict";var r=n(4);t.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==r(t)?t.split(""):Object(t)}},function(t,e,n){"use strict";var r=n(37),o=n(40),i=n(41);t.exports=function(t){return function(e,n,s){var a,u=r(e),c=o(u.length),l=i(s,c);if(t&&n!=n){for(;c>l;)if(a=u[l++],a!=a)return!0}else for(;c>l;l++)if((t||l in u)&&u[l]===n)return t||l||0;return!t&&-1}}},function(t,e,n){"use strict";var r=n(24),o=Math.min;t.exports=function(t){return t>0?o(r(t),9007199254740991):0}},function(t,e,n){"use strict";var r=n(24),o=Math.max,i=Math.min;t.exports=function(t,e){return t=r(t),t<0?o(t+e,0):i(t,e)}},function(t,e,n){"use strict";var r=n(6)("keys"),o=n(8);t.exports=function(t){return r[t]||(r[t]=o(t))}},function(t,e){"use strict";t.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(t,e,n){"use strict";t.exports=n(7).document&&document.documentElement},function(t,e,n){"use strict";var r=n(11).f,o=n(20),i=n(5)("toStringTag");t.exports=function(t,e,n){t&&!o(t=n?t:t.prototype,i)&&r(t,i,{configurable:!0,value:e})}},function(t,e,n){"use strict";var r=n(20),o=n(47),i=n(42)("IE_PROTO"),s=Object.prototype;t.exports=Object.getPrototypeOf||function(t){return t=o(t),r(t,i)?t[i]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?s:null}},function(t,e,n){"use strict";var r=n(25);t.exports=function(t){return Object(r(t))}},function(t,e,n){"use strict";for(var r=n(49),o=n(9),i=n(7),s=n(10),a=n(31),u=n(5),c=u("iterator"),l=u("toStringTag"),f=a.Array,d=["NodeList","DOMTokenList","MediaList","StyleSheetList","CSSRuleList"],h=0;h<5;h++){var p,v=d[h],y=i[v],m=y&&y.prototype;if(m){m[c]||s(m,c,f),m[l]||s(m,l,v),a[v]=f;for(p in r)m[p]||o(m,p,r[p],!0)}}},function(t,e,n){"use strict";var r=n(50),o=n(51),i=n(31),s=n(37);t.exports=n(26)(Array,"Array",function(t,e){this._t=s(t),this._i=0,this._k=e},function(){var t=this._t,e=this._k,n=this._i++;return!t||n>=t.length?(this._t=void 0,o(1)):"keys"==e?o(0,n):"values"==e?o(0,t[n]):o(0,[n,t[n]])},"values"),i.Arguments=i.Array,r("keys"),r("values"),r("entries")},function(t,e,n){"use strict";var r=n(5)("unscopables"),o=Array.prototype;void 0==o[r]&&n(10)(o,r,{}),t.exports=function(t){o[r][t]=!0}},function(t,e){"use strict";t.exports=function(t,e){return{value:e,done:!!t}}},function(t,e,n){"use strict";var r,o,i,s=n(27),a=n(7),u=n(29),c=n(3),l=n(28),f=n(13),d=n(30),h=n(53),p=n(54),v=n(58),y=n(59).set,m=n(61)(),g="Promise",b=a.TypeError,w=a.process,_=a[g],w=a.process,S="process"==c(w),x=function(){},E=!!function(){try{var t=_.resolve(1),e=(t.constructor={})[n(5)("species")]=function(t){t(x,x)};return(S||"function"==typeof PromiseRejectionEvent)&&t.then(x)instanceof e}catch(t){}}(),k=function(t,e){return t===e||t===_&&e===i},T=function(t){var e;return!(!f(t)||"function"!=typeof(e=t.then))&&e},O=function(t){return k(_,t)?new C(t):new o(t)},C=o=function(t){var e,n;this.promise=new t(function(t,r){if(void 0!==e||void 0!==n)throw b("Bad Promise constructor");e=t,n=r}),this.resolve=d(e),this.reject=d(n)},P=function(t){try{t()}catch(t){return{error:t}}},A=function(t,e){if(!t._n){t._n=!0;var n=t._c;m(function(){for(var r=t._v,o=1==t._s,i=0,s=function(e){var n,i,s=o?e.ok:e.fail,a=e.resolve,u=e.reject,c=e.domain;try{s?(o||(2==t._h&&L(t),t._h=1),s===!0?n=r:(c&&c.enter(),n=s(r),c&&c.exit()),n===e.promise?u(b("Promise-chain cycle")):(i=T(n))?i.call(n,a,u):a(n)):u(r)}catch(t){u(t)}};n.length>i;)s(n[i++]);t._c=[],t._n=!1,e&&!t._h&&M(t)})}},M=function(t){y.call(a,function(){var e,n,r,o=t._v;if(j(t)&&(e=P(function(){S?w.emit("unhandledRejection",o,t):(n=a.onunhandledrejection)?n({promise:t,reason:o}):(r=a.console)&&r.error&&r.error("Unhandled promise rejection",o)}),t._h=S||j(t)?2:1),t._a=void 0,e)throw e.error})},j=function t(e){if(1==e._h)return!1;for(var n,r=e._a||e._c,o=0;r.length>o;)if(n=r[o++],n.fail||!t(n.promise))return!1;return!0},L=function(t){y.call(a,function(){var e;S?w.emit("rejectionHandled",t):(e=a.onrejectionhandled)&&e({promise:t,reason:t._v})})},F=function(t){var e=this;e._d||(e._d=!0,e=e._w||e,e._v=t,e._s=2,e._a||(e._a=e._c.slice()),A(e,!0))},N=function t(e){var n,r=this;if(!r._d){r._d=!0,r=r._w||r;try{if(r===e)throw b("Promise can't be resolved itself");(n=T(e))?m(function(){var o={_w:r,_d:!1};try{n.call(e,u(t,o,1),u(F,o,1))}catch(t){F.call(o,t)}}):(r._v=e,r._s=1,A(r,!1))}catch(t){F.call({_w:r,_d:!1},t)}}};E||(_=function(t){h(this,_,g,"_h"),d(t),r.call(this);try{t(u(N,this,1),u(F,this,1))}catch(t){F.call(this,t)}},r=function(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1},r.prototype=n(62)(_.prototype,{then:function(t,e){var n=O(v(this,_));return n.ok="function"!=typeof t||t,n.fail="function"==typeof e&&e,n.domain=S?w.domain:void 0,this._c.push(n),this._a&&this._a.push(n),this._s&&A(this,!1),n.promise},catch:function(t){return this.then(void 0,t)}}),C=function(){var t=new r;this.promise=t,this.resolve=u(N,t,1),this.reject=u(F,t,1)}),l(l.G+l.W+l.F*!E,{Promise:_}),n(45)(_,g),n(63)(g),i=n(21)[g],l(l.S+l.F*!E,g,{reject:function(t){var e=O(this),n=e.reject;return n(t),e.promise}}),l(l.S+l.F*(s||!E),g,{resolve:function(t){if(t instanceof _&&k(t.constructor,this))return t;var e=O(this),n=e.resolve;return n(t),e.promise}}),l(l.S+l.F*!(E&&n(64)(function(t){_.all(t).catch(x)})),g,{all:function(t){var e=this,n=O(e),r=n.resolve,o=n.reject,i=P(function(){var n=[],i=0,s=1;p(t,!1,function(t){var a=i++,u=!1;n.push(void 0),s++,e.resolve(t).then(function(t){u||(u=!0,n[a]=t,--s||r(n))},o)}),--s||r(n)});return i&&o(i.error),n.promise},race:function(t){var e=this,n=O(e),r=n.reject,o=P(function(){p(t,!1,function(t){e.resolve(t).then(n.resolve,r)})});return o&&r(o.error),n.promise}})},function(t,e){"use strict";t.exports=function(t,e,n,r){if(!(t instanceof e)||void 0!==r&&r in t)throw TypeError(n+": incorrect invocation!");return t}},function(t,e,n){"use strict";var r=n(29),o=n(55),i=n(56),s=n(12),a=n(40),u=n(57),c={},l={},f=t.exports=function(t,e,n,f,d){var h,p,v,y,m=d?function(){return t}:u(t),g=r(n,f,e?2:1),b=0;if("function"!=typeof m)throw TypeError(t+" is not iterable!");if(i(m)){for(h=a(t.length);h>b;b++)if(y=e?g(s(p=t[b])[0],p[1]):g(t[b]),y===c||y===l)return y}else for(v=m.call(t);!(p=v.next()).done;)if(y=o(v,g,p.value,e),y===c||y===l)return y};f.BREAK=c,f.RETURN=l},function(t,e,n){"use strict";var r=n(12);t.exports=function(t,e,n,o){try{return o?e(r(n)[0],n[1]):e(n)}catch(e){var i=t.return;throw void 0!==i&&r(i.call(t)),e}}},function(t,e,n){"use strict";var r=n(31),o=n(5)("iterator"),i=Array.prototype;t.exports=function(t){return void 0!==t&&(r.Array===t||i[o]===t)}},function(t,e,n){"use strict";var r=n(3),o=n(5)("iterator"),i=n(31);t.exports=n(21).getIteratorMethod=function(t){if(void 0!=t)return t[o]||t["@@iterator"]||i[r(t)]}},function(t,e,n){"use strict";var r=n(12),o=n(30),i=n(5)("species");t.exports=function(t,e){var n,s=r(t).constructor;return void 0===s||void 0==(n=r(s)[i])?e:o(n)}},function(t,e,n){"use strict";var r,o,i,s=n(29),a=n(60),u=n(44),c=n(17),l=n(7),f=l.process,d=l.setImmediate,h=l.clearImmediate,p=l.MessageChannel,v=0,y={},m="onreadystatechange",g=function(){var t=+this;if(y.hasOwnProperty(t)){var e=y[t];delete y[t],e()}},b=function(t){g.call(t.data)};d&&h||(d=function(t){for(var e=[],n=1;arguments.length>n;)e.push(arguments[n++]);return y[++v]=function(){a("function"==typeof t?t:Function(t),e)},r(v),v},h=function(t){delete y[t]},"process"==n(4)(f)?r=function(t){f.nextTick(s(g,t,1))}:p?(o=new p,i=o.port2,o.port1.onmessage=b,r=s(i.postMessage,i,1)):l.addEventListener&&"function"==typeof postMessage&&!l.importScripts?(r=function(t){l.postMessage(t+"","*")},l.addEventListener("message",b,!1)):r=m in c("script")?function(t){u.appendChild(c("script"))[m]=function(){u.removeChild(this),g.call(t)}}:function(t){setTimeout(s(g,t,1),0)}),t.exports={set:d,clear:h}},function(t,e){"use strict";t.exports=function(t,e,n){var r=void 0===n;switch(e.length){case 0:return r?t():t.call(n);case 1:return r?t(e[0]):t.call(n,e[0]);case 2:return r?t(e[0],e[1]):t.call(n,e[0],e[1]);case 3:return r?t(e[0],e[1],e[2]):t.call(n,e[0],e[1],e[2]);case 4:return r?t(e[0],e[1],e[2],e[3]):t.call(n,e[0],e[1],e[2],e[3])}return t.apply(n,e)}},function(t,e,n){"use strict";var r=n(7),o=n(59).set,i=r.MutationObserver||r.WebKitMutationObserver,s=r.process,a=r.Promise,u="process"==n(4)(s);t.exports=function(){var t,e,n,c=function(){var r,o;for(u&&(r=s.domain)&&r.exit();t;){o=t.fn,t=t.next;try{o()}catch(r){throw t?n():e=void 0,r}}e=void 0,r&&r.enter()};if(u)n=function(){s.nextTick(c)};else if(i){var l=!0,f=document.createTextNode("");new i(c).observe(f,{characterData:!0}),n=function(){f.data=l=!l}}else if(a&&a.resolve){var d=a.resolve();n=function(){d.then(c)}}else n=function(){o.call(r,c)};return function(r){var o={fn:r,next:void 0};e&&(e.next=o),t||(t=o,n()),e=o}}},function(t,e,n){"use strict";var r=n(9);t.exports=function(t,e,n){for(var o in e)r(t,o,e[o],n);return t}},function(t,e,n){"use strict";var r=n(7),o=n(11),i=n(15),s=n(5)("species");t.exports=function(t){var e=r[t];i&&e&&!e[s]&&o.f(e,s,{configurable:!0,get:function(){return this}})}},function(t,e,n){"use strict";var r=n(5)("iterator"),o=!1;try{var i=[7][r]();i.return=function(){o=!0},Array.from(i,function(){throw 2})}catch(t){}t.exports=function(t,e){if(!e&&!o)return!1;var n=!1;try{var i=[7],s=i[r]();s.next=function(){return{done:n=!0}},i[r]=function(){return s},t(i)}catch(t){}return n}},function(t,e){"use strict";try{var n=new window.CustomEvent("test");if(n.preventDefault(),n.defaultPrevented!==!0)throw new Error("Could not prevent default")}catch(t){var r=function(t,e){var n,r;return e=e||{bubbles:!1,cancelable:!1,detail:void 0},n=document.createEvent("CustomEvent"),n.initCustomEvent(t,e.bubbles,e.cancelable,e.detail),r=n.preventDefault,n.preventDefault=function(){r.call(this);try{Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}})}catch(t){this.defaultPrevented=!0}},n};r.prototype=window.Event.prototype,window.CustomEvent=r}},function(t,e){"use strict";!function(t){function e(t){if("string"!=typeof t&&(t=String(t)),/[^a-z0-9\-#$%&'*+.\^_`|~]/i.test(t))throw new TypeError("Invalid character in header field name");return t.toLowerCase()}function n(t){return"string"!=typeof t&&(t=String(t)),t}function r(t){var e={next:function(){var e=t.shift();return{done:void 0===e,value:e}}};return m.iterable&&(e[Symbol.iterator]=function(){return e}),e}function o(t){this.map={},t instanceof o?t.forEach(function(t,e){this.append(e,t)},this):t&&Object.getOwnPropertyNames(t).forEach(function(e){this.append(e,t[e])},this)}function i(t){return t.bodyUsed?Promise.reject(new TypeError("Already read")):void(t.bodyUsed=!0)}function s(t){return new Promise(function(e,n){t.onload=function(){e(t.result)},t.onerror=function(){n(t.error)}})}function a(t){var e=new FileReader,n=s(e);return e.readAsArrayBuffer(t),n}function u(t){var e=new FileReader,n=s(e);return e.readAsText(t),n}function c(t){for(var e=new Uint8Array(t),n=new Array(e.length),r=0;r<e.length;r++)n[r]=String.fromCharCode(e[r]);return n.join("")}function l(t){if(t.slice)return t.slice(0);var e=new Uint8Array(t.byteLength);return e.set(new Uint8Array(t)),e.buffer}function f(){return this.bodyUsed=!1,this._initBody=function(t){if(this._bodyInit=t,t)if("string"==typeof t)this._bodyText=t;else if(m.blob&&Blob.prototype.isPrototypeOf(t))this._bodyBlob=t;else if(m.formData&&FormData.prototype.isPrototypeOf(t))this._bodyFormData=t;else if(m.searchParams&&URLSearchParams.prototype.isPrototypeOf(t))this._bodyText=t.toString();else if(m.arrayBuffer&&m.blob&&b(t))this._bodyArrayBuffer=l(t.buffer),this._bodyInit=new Blob([this._bodyArrayBuffer]);else{if(!m.arrayBuffer||!ArrayBuffer.prototype.isPrototypeOf(t)&&!w(t))throw new Error("unsupported BodyInit type");this._bodyArrayBuffer=l(t)}else this._bodyText="";this.headers.get("content-type")||("string"==typeof t?this.headers.set("content-type","text/plain;charset=UTF-8"):this._bodyBlob&&this._bodyBlob.type?this.headers.set("content-type",this._bodyBlob.type):m.searchParams&&URLSearchParams.prototype.isPrototypeOf(t)&&this.headers.set("content-type","application/x-www-form-urlencoded;charset=UTF-8"))},m.blob&&(this.blob=function(){var t=i(this);if(t)return t;if(this._bodyBlob)return Promise.resolve(this._bodyBlob);if(this._bodyArrayBuffer)return Promise.resolve(new Blob([this._bodyArrayBuffer]));if(this._bodyFormData)throw new Error("could not read FormData body as blob");return Promise.resolve(new Blob([this._bodyText]))},this.arrayBuffer=function(){return this._bodyArrayBuffer?i(this)||Promise.resolve(this._bodyArrayBuffer):this.blob().then(a)}),this.text=function(){var t=i(this);if(t)return t;if(this._bodyBlob)return u(this._bodyBlob);if(this._bodyArrayBuffer)return Promise.resolve(c(this._bodyArrayBuffer));if(this._bodyFormData)throw new Error("could not read FormData body as text");return Promise.resolve(this._bodyText)},m.formData&&(this.formData=function(){return this.text().then(p)}),this.json=function(){return this.text().then(JSON.parse)},this}function d(t){var e=t.toUpperCase();return _.indexOf(e)>-1?e:t}function h(t,e){e=e||{};var n=e.body;if("string"==typeof t)this.url=t;else{if(t.bodyUsed)throw new TypeError("Already read");this.url=t.url,this.credentials=t.credentials,e.headers||(this.headers=new o(t.headers)),this.method=t.method,this.mode=t.mode,n||null==t._bodyInit||(n=t._bodyInit,t.bodyUsed=!0)}if(this.credentials=e.credentials||this.credentials||"omit",!e.headers&&this.headers||(this.headers=new o(e.headers)),this.method=d(e.method||this.method||"GET"),this.mode=e.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function p(t){var e=new FormData;return t.trim().split("&").forEach(function(t){if(t){var n=t.split("="),r=n.shift().replace(/\+/g," "),o=n.join("=").replace(/\+/g," ");e.append(decodeURIComponent(r),decodeURIComponent(o))}}),e}function v(t){var e=new o;return t.split("\r\n").forEach(function(t){var n=t.split(":"),r=n.shift().trim();if(r){var o=n.join(":").trim();e.append(r,o)}}),e}function y(t,e){e||(e={}),this.type="default",this.status="status"in e?e.status:200,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in e?e.statusText:"OK",this.headers=new o(e.headers),this.url=e.url||"",this._initBody(t)}if(!t.fetch){var m={searchParams:"URLSearchParams"in t,iterable:"Symbol"in t&&"iterator"in Symbol,blob:"FileReader"in t&&"Blob"in t&&function(){try{return new Blob,!0}catch(t){return!1}}(),formData:"FormData"in t,arrayBuffer:"ArrayBuffer"in t};if(m.arrayBuffer)var g=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],b=function(t){return t&&DataView.prototype.isPrototypeOf(t)},w=ArrayBuffer.isView||function(t){return t&&g.indexOf(Object.prototype.toString.call(t))>-1};o.prototype.append=function(t,r){t=e(t),r=n(r);var o=this.map[t];this.map[t]=o?o+","+r:r},o.prototype.delete=function(t){delete this.map[e(t)]},o.prototype.get=function(t){return t=e(t),this.has(t)?this.map[t]:null},o.prototype.has=function(t){return this.map.hasOwnProperty(e(t))},o.prototype.set=function(t,r){this.map[e(t)]=n(r)},o.prototype.forEach=function(t,e){for(var n in this.map)this.map.hasOwnProperty(n)&&t.call(e,this.map[n],n,this)},o.prototype.keys=function(){var t=[];return this.forEach(function(e,n){t.push(n)}),r(t)},o.prototype.values=function(){var t=[];return this.forEach(function(e){t.push(e)}),r(t)},o.prototype.entries=function(){var t=[];return this.forEach(function(e,n){t.push([n,e])}),r(t)},m.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var _=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];h.prototype.clone=function(){return new h(this,{body:this._bodyInit})},f.call(h.prototype),f.call(y.prototype),y.prototype.clone=function(){return new y(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},y.error=function(){var t=new y(null,{status:0,statusText:""});return t.type="error",t};var S=[301,302,303,307,308];y.redirect=function(t,e){if(S.indexOf(e)===-1)throw new RangeError("Invalid status code");return new y(null,{status:e,headers:{location:t}})},t.Headers=o,t.Request=h,t.Response=y,t.fetch=function(t,e){return new Promise(function(n,r){var o=new h(t,e),i=new XMLHttpRequest;i.onload=function(){var t={status:i.status,statusText:i.statusText,headers:v(i.getAllResponseHeaders()||"")};t.url="responseURL"in i?i.responseURL:t.headers.get("X-Request-URL");var e="response"in i?i.response:i.responseText;n(new y(e,t))},i.onerror=function(){r(new TypeError("Network request failed"))},i.ontimeout=function(){r(new TypeError("Network request failed"))},i.open(o.method,o.url,!0),"include"===o.credentials&&(i.withCredentials=!0),"responseType"in i&&m.blob&&(i.responseType="blob"),o.headers.forEach(function(t,e){i.setRequestHeader(e,t)}),i.send("undefined"==typeof o._bodyInit?null:o._bodyInit)})},t.fetch.polyfill=!0}}("undefined"!=typeof self?self:void 0)},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}function o(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var i=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),s=n(68),a=r(s),u=n(70),c=r(u),l=function(){function t(e){o(this,t),this.config_=e}return i(t,[{key:"initialize",value:function(){var t=this;new c.default.Event.Listener(document,"DOMContentLoaded",function(){Modernizr.addTest("ios",function(){return!!navigator.userAgent.match(/(iPad|iPhone|iPod)/g)}),Modernizr.addTest("standalone",function(){return!!navigator.standalone}),a.default.attach(document.body);var t=document.querySelectorAll("table:not([class])");if(Array.prototype.forEach.call(t,function(t){var e=document.createElement("div");e.classList.add("md-typeset__table"),t.nextSibling?t.parentNode.insertBefore(e,t.nextSibling):t.parentNode.appendChild(e),e.appendChild(t)}),Modernizr.ios){var e=document.querySelectorAll("[data-md-scrollfix]");Array.prototype.forEach.call(e,function(t){t.addEventListener("touchstart",function(){var e=t.scrollTop;0===e?t.scrollTop=1:e+t.offsetHeight===t.scrollHeight&&(t.scrollTop=e-1)})})}}).listen(),Modernizr.csscalc||new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener(window,["resize","orientationchange"],new c.default.Sidebar.Container("[data-md-component=container]"))),new c.default.Event.MatchMedia("(min-width: 1220px)",new c.default.Event.Listener(window,["scroll","resize","orientationchange"],new c.default.Sidebar.Position("[data-md-component=navigation]"))),new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener(window,["scroll","resize","orientationchange"],new c.default.Sidebar.Position("[data-md-component=toc]"))),new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener(window,"scroll",new c.default.Nav.Blur("[data-md-component=toc] .md-nav__link")));var e=document.querySelectorAll("[data-md-component=collapsible]");Array.prototype.forEach.call(e,function(t){new c.default.Event.MatchMedia("(min-width: 1220px)",new c.default.Event.Listener(t.previousElementSibling,"click",new c.default.Nav.Collapse(t)))}),new c.default.Event.MatchMedia("(max-width: 1219px)",new c.default.Event.Listener("[data-md-component=navigation] [data-md-toggle]","change",new c.default.Nav.Scrolling("[data-md-component=navigation] nav"))),new c.default.Event.MatchMedia("(max-width: 959px)",new c.default.Event.Listener("[data-md-toggle=search]","change",new c.default.Search.Lock("[data-md-toggle=search]"))),new c.default.Event.Listener(document.forms.search.query,["focus","keyup"],new c.default.Search.Result("[data-md-component=result]",function(){return fetch(t.config_.url.base+"/mkdocs/search_index.json",{credentials:"same-origin"}).then(function(t){return t.json()}).then(function(e){return e.docs.map(function(e){return e.location=t.config_.url.base+e.location,e})})})).listen(),new c.default.Event.MatchMedia("(max-width: 1219px)",new c.default.Event.Listener("[data-md-component=overlay]","touchstart",function(t){return t.preventDefault()})),new c.default.Event.MatchMedia("(max-width: 959px)",new c.default.Event.Listener("[data-md-component=navigation] [href^='#']","click",function(){var t=document.querySelector("[data-md-toggle=drawer]");t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change")))})),new c.default.Event.Listener("[data-md-toggle=search]","change",function(t){setTimeout(function(t){var e=document.forms.search.query;t.checked&&e.focus()},400,t.target)}).listen(),new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener(document.forms.search.query,"focus",function(){var t=document.querySelector("[data-md-toggle=search]");t.checked||(t.checked=!0,t.dispatchEvent(new CustomEvent("change")))})),new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener(document.body,"click",function(){var t=document.querySelector("[data-md-toggle=search]");t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change")))})),new c.default.Event.Listener(window,"keyup",function(t){var e=t.keyCode||t.which;if(27===e){var n=document.querySelector("[data-md-toggle=search]");n.checked&&(n.checked=!1,n.dispatchEvent(new CustomEvent("change")),document.forms.search.query.blur())}}).listen(),new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener("[data-md-toggle=search]","click",function(t){return t.stopPropagation()})),new c.default.Event.MatchMedia("(min-width: 960px)",new c.default.Event.Listener("[data-md-component=search]","click",function(t){return t.stopPropagation()})),function(){var t=document.querySelector("[data-md-source]");if(!t)return Promise.resolve([]);switch(t.dataset.mdSource){case"github":return new c.default.Source.Adapter.GitHub(t).fetch();default:return Promise.resolve([])}}().then(function(t){var e=document.querySelectorAll("[data-md-source]");Array.prototype.forEach.call(e,function(e){new c.default.Source.Repository(e).initialize(t)})})}}]),t}();e.default=l,t.exports=e.default},function(t,e,n){var r,o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};!function(){"use strict";/**
-	  * @preserve FastClick: polyfill to remove click delays on browsers with touch UIs.
-	  *
-	  * @codingstandard ftlabs-jsv2
-	  * @copyright The Financial Times Limited [All Rights Reserved]
-	  * @license MIT License (see LICENSE.txt)
-	  */
-function i(t,e){function n(t,e){return function(){return t.apply(e,arguments)}}var r;if(e=e||{},this.trackingClick=!1,this.trackingClickStart=0,this.targetElement=null,this.touchStartX=0,this.touchStartY=0,this.lastTouchIdentifier=0,this.touchBoundary=e.touchBoundary||10,this.layer=t,this.tapDelay=e.tapDelay||200,this.tapTimeout=e.tapTimeout||700,!i.notNeeded(t)){for(var o=["onMouse","onClick","onTouchStart","onTouchMove","onTouchEnd","onTouchCancel"],s=this,u=0,c=o.length;u<c;u++)s[o[u]]=n(s[o[u]],s);a&&(t.addEventListener("mouseover",this.onMouse,!0),t.addEventListener("mousedown",this.onMouse,!0),t.addEventListener("mouseup",this.onMouse,!0)),t.addEventListener("click",this.onClick,!0),t.addEventListener("touchstart",this.onTouchStart,!1),t.addEventListener("touchmove",this.onTouchMove,!1),t.addEventListener("touchend",this.onTouchEnd,!1),t.addEventListener("touchcancel",this.onTouchCancel,!1),Event.prototype.stopImmediatePropagation||(t.removeEventListener=function(e,n,r){var o=Node.prototype.removeEventListener;"click"===e?o.call(t,e,n.hijacked||n,r):o.call(t,e,n,r)},t.addEventListener=function(e,n,r){var o=Node.prototype.addEventListener;"click"===e?o.call(t,e,n.hijacked||(n.hijacked=function(t){t.propagationStopped||n(t)}),r):o.call(t,e,n,r)}),"function"==typeof t.onclick&&(r=t.onclick,t.addEventListener("click",function(t){r(t)},!1),t.onclick=null)}}var s=navigator.userAgent.indexOf("Windows Phone")>=0,a=navigator.userAgent.indexOf("Android")>0&&!s,u=/iP(ad|hone|od)/.test(navigator.userAgent)&&!s,c=u&&/OS 4_\d(_\d)?/.test(navigator.userAgent),l=u&&/OS [6-7]_\d/.test(navigator.userAgent),f=navigator.userAgent.indexOf("BB10")>0;i.prototype.needsClick=function(t){switch(t.nodeName.toLowerCase()){case"button":case"select":case"textarea":if(t.disabled)return!0;break;case"input":if(u&&"file"===t.type||t.disabled)return!0;break;case"label":case"iframe":case"video":return!0}return/\bneedsclick\b/.test(t.className)},i.prototype.needsFocus=function(t){switch(t.nodeName.toLowerCase()){case"textarea":return!0;case"select":return!a;case"input":switch(t.type){case"button":case"checkbox":case"file":case"image":case"radio":case"submit":return!1}return!t.disabled&&!t.readOnly;default:return/\bneedsfocus\b/.test(t.className)}},i.prototype.sendClick=function(t,e){var n,r;document.activeElement&&document.activeElement!==t&&document.activeElement.blur(),r=e.changedTouches[0],n=document.createEvent("MouseEvents"),n.initMouseEvent(this.determineEventType(t),!0,!0,window,1,r.screenX,r.screenY,r.clientX,r.clientY,!1,!1,!1,!1,0,null),n.forwardedTouchEvent=!0,t.dispatchEvent(n)},i.prototype.determineEventType=function(t){return a&&"select"===t.tagName.toLowerCase()?"mousedown":"click"},i.prototype.focus=function(t){var e;u&&t.setSelectionRange&&0!==t.type.indexOf("date")&&"time"!==t.type&&"month"!==t.type?(e=t.value.length,t.setSelectionRange(e,e)):t.focus()},i.prototype.updateScrollParent=function(t){var e,n;if(e=t.fastClickScrollParent,!e||!e.contains(t)){n=t;do{if(n.scrollHeight>n.offsetHeight){e=n,t.fastClickScrollParent=n;break}n=n.parentElement}while(n)}e&&(e.fastClickLastScrollTop=e.scrollTop)},i.prototype.getTargetElementFromEventTarget=function(t){return t.nodeType===Node.TEXT_NODE?t.parentNode:t},i.prototype.onTouchStart=function(t){var e,n,r;if(t.targetTouches.length>1)return!0;if(e=this.getTargetElementFromEventTarget(t.target),n=t.targetTouches[0],u){if(r=window.getSelection(),r.rangeCount&&!r.isCollapsed)return!0;if(!c){if(n.identifier&&n.identifier===this.lastTouchIdentifier)return t.preventDefault(),!1;this.lastTouchIdentifier=n.identifier,this.updateScrollParent(e)}}return this.trackingClick=!0,this.trackingClickStart=t.timeStamp,this.targetElement=e,this.touchStartX=n.pageX,this.touchStartY=n.pageY,t.timeStamp-this.lastClickTime<this.tapDelay&&t.preventDefault(),!0},i.prototype.touchHasMoved=function(t){var e=t.changedTouches[0],n=this.touchBoundary;return Math.abs(e.pageX-this.touchStartX)>n||Math.abs(e.pageY-this.touchStartY)>n},i.prototype.onTouchMove=function(t){return!this.trackingClick||((this.targetElement!==this.getTargetElementFromEventTarget(t.target)||this.touchHasMoved(t))&&(this.trackingClick=!1,this.targetElement=null),!0)},i.prototype.findControl=function(t){return void 0!==t.control?t.control:t.htmlFor?document.getElementById(t.htmlFor):t.querySelector("button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea")},i.prototype.onTouchEnd=function(t){var e,n,r,o,i,s=this.targetElement;if(!this.trackingClick)return!0;if(t.timeStamp-this.lastClickTime<this.tapDelay)return this.cancelNextClick=!0,!0;if(t.timeStamp-this.trackingClickStart>this.tapTimeout)return!0;if(this.cancelNextClick=!1,this.lastClickTime=t.timeStamp,n=this.trackingClickStart,this.trackingClick=!1,this.trackingClickStart=0,l&&(i=t.changedTouches[0],s=document.elementFromPoint(i.pageX-window.pageXOffset,i.pageY-window.pageYOffset)||s,s.fastClickScrollParent=this.targetElement.fastClickScrollParent),r=s.tagName.toLowerCase(),"label"===r){if(e=this.findControl(s)){if(this.focus(s),a)return!1;s=e}}else if(this.needsFocus(s))return t.timeStamp-n>100||u&&window.top!==window&&"input"===r?(this.targetElement=null,!1):(this.focus(s),this.sendClick(s,t),u&&"select"===r||(this.targetElement=null,t.preventDefault()),!1);return!(!u||c||(o=s.fastClickScrollParent,!o||o.fastClickLastScrollTop===o.scrollTop))||(this.needsClick(s)||(t.preventDefault(),this.sendClick(s,t)),!1)},i.prototype.onTouchCancel=function(){this.trackingClick=!1,this.targetElement=null},i.prototype.onMouse=function(t){return!this.targetElement||(!!t.forwardedTouchEvent||(!t.cancelable||(!(!this.needsClick(this.targetElement)||this.cancelNextClick)||(t.stopImmediatePropagation?t.stopImmediatePropagation():t.propagationStopped=!0,t.stopPropagation(),t.preventDefault(),!1))))},i.prototype.onClick=function(t){var e;return this.trackingClick?(this.targetElement=null,this.trackingClick=!1,!0):"submit"===t.target.type&&0===t.detail||(e=this.onMouse(t),e||(this.targetElement=null),e)},i.prototype.destroy=function(){var t=this.layer;a&&(t.removeEventListener("mouseover",this.onMouse,!0),t.removeEventListener("mousedown",this.onMouse,!0),t.removeEventListener("mouseup",this.onMouse,!0)),t.removeEventListener("click",this.onClick,!0),t.removeEventListener("touchstart",this.onTouchStart,!1),t.removeEventListener("touchmove",this.onTouchMove,!1),t.removeEventListener("touchend",this.onTouchEnd,!1),t.removeEventListener("touchcancel",this.onTouchCancel,!1)},i.notNeeded=function(t){var e,n,r,o;if("undefined"==typeof window.ontouchstart)return!0;if(n=+(/Chrome\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1]){if(!a)return!0;if(e=document.querySelector("meta[name=viewport]")){if(e.content.indexOf("user-scalable=no")!==-1)return!0;if(n>31&&document.documentElement.scrollWidth<=window.outerWidth)return!0}}if(f&&(r=navigator.userAgent.match(/Version\/([0-9]*)\.([0-9]*)/),r[1]>=10&&r[2]>=3&&(e=document.querySelector("meta[name=viewport]")))){if(e.content.indexOf("user-scalable=no")!==-1)return!0;if(document.documentElement.scrollWidth<=window.outerWidth)return!0}return"none"===t.style.msTouchAction||"manipulation"===t.style.touchAction||(o=+(/Firefox\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1],!!(o>=27&&(e=document.querySelector("meta[name=viewport]"),e&&(e.content.indexOf("user-scalable=no")!==-1||document.documentElement.scrollWidth<=window.outerWidth)))||("none"===t.style.touchAction||"manipulation"===t.style.touchAction))},i.attach=function(t,e){return new i(t,e)},"object"===o(n(69))&&n(69)?(r=function(){return i}.call(e,n,e,t),!(void 0!==r&&(t.exports=r))):"undefined"!=typeof t&&t.exports?(t.exports=i.attach,t.exports.FastClick=i):window.FastClick=i}()},function(t,e){(function(e){t.exports=e}).call(e,{})},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(71),i=r(o),s=n(74),a=r(s),u=n(78),c=r(u),l=n(83),f=r(l),d=n(86),h=r(d);e.default={Event:i.default,Nav:a.default,Search:c.default,Sidebar:f.default,Source:h.default},t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(72),i=r(o),s=n(73),a=r(s);e.default={Listener:i.default,MatchMedia:a.default},t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e,r,o){var i=this;n(this,t),this.els_="string"==typeof e?document.querySelectorAll(e):[].concat(e),this.handler_="function"==typeof o?{update:o}:o,this.events_=[].concat(r),this.update_=function(t){return i.handler_.update(t)}}return r(t,[{key:"listen",value:function(){var t=this;Array.prototype.forEach.call(this.els_,function(e){t.events_.forEach(function(n){e.addEventListener(n,t.update_,!1)})}),"function"==typeof this.handler_.setup&&this.handler_.setup()}},{key:"unlisten",value:function(){var t=this;Array.prototype.forEach.call(this.els_,function(e){t.events_.forEach(function(n){e.removeEventListener(n,t.update_)})}),"function"==typeof this.handler_.reset&&this.handler_.reset()}}]),t}();e.default=o,t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function t(e,r){n(this,t),this.handler_=function(t){t.matches?r.listen():r.unlisten()};var o=window.matchMedia(e);o.addListener(this.handler_),this.handler_(o)};e.default=r,t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(75),i=r(o),s=n(76),a=r(s),u=n(77),c=r(u);e.default={Blur:i.default,Collapse:a.default,Scrolling:c.default},t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e){n(this,t),this.els_="string"==typeof e?document.querySelectorAll(e):e,this.index_=0,this.offset_=window.pageYOffset,this.anchors_=[].map.call(this.els_,function(t){return document.getElementById(t.hash.substring(1))})}return r(t,[{key:"setup",value:function(){this.update()}},{key:"update",value:function(){var t=window.pageYOffset;if(0!==this.anchors_.length){if(this.offset_<=t)for(var e=this.index_+1;e<this.els_.length&&this.anchors_[e].offsetTop-80<=t;e++)e>0&&(this.els_[e-1].dataset.mdState="blur"),this.index_=e;else for(var n=this.index_;n>=0;n--){if(!(this.anchors_[n].offsetTop-80>t)){this.index_=n;break}n>0&&(this.els_[n-1].dataset.mdState="")}this.offset_=t}}},{key:"reset",value:function(){Array.prototype.forEach.call(this.els_,function(t){t.dataset.mdState=""}),this.index_=0,this.offset_=window.pageYOffset}}]),t}();e.default=o,t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e){n(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return r(t,[{key:"update",value:function(){var t=this,e=this.el_.getBoundingClientRect().height;e?(this.el_.style.maxHeight=e+"px",requestAnimationFrame(function(){t.el_.setAttribute("data-md-state","animate"),t.el_.style.maxHeight="0px"})):!function(){t.el_.setAttribute("data-md-state","expand"),t.el_.style.maxHeight="";var e=t.el_.getBoundingClientRect().height;t.el_.removeAttribute("data-md-state"),t.el_.style.maxHeight="0px",requestAnimationFrame(function(){t.el_.setAttribute("data-md-state","animate"),t.el_.style.maxHeight=e+"px"})}();var n=function t(e){e.target.removeAttribute("data-md-state"),e.target.style.maxHeight="",e.target.removeEventListener("transitionend",t)};this.el_.addEventListener("transitionend",n,!1)}},{key:"reset",value:function(){this.el_.dataset.mdState="",this.el_.style.maxHeight=""}}]),t}();e.default=o,t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e){n(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return r(t,[{key:"setup",value:function(){this.el_.children[1].style.webkitOverflowScrolling="touch";var t=this.el_.querySelectorAll("[data-md-toggle]");Array.prototype.forEach.call(t,function(t){if(t.checked){for(var e=t.nextElementSibling;"NAV"!==e.tagName;)e=e.nextElementSibling;var n=t.parentNode.parentNode,r=e.children[e.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling="touch"}})}},{key:"update",value:function(t){for(var e=t.target.nextElementSibling;"NAV"!==e.tagName;)e=e.nextElementSibling;var n=t.target.parentNode.parentNode,r=e.children[e.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling="",t.target.checked||!function(){var t=function t(){n.style.webkitOverflowScrolling="touch",e.removeEventListener("transitionend",t)};e.addEventListener("transitionend",t,!1)}(),t.target.checked&&!function(){var t=function t(){r.style.webkitOverflowScrolling="touch",e.removeEventListener("transitionend",t,!1)};e.addEventListener("transitionend",t,!1)}()}},{key:"reset",value:function(){this.el_.children[1].style.webkitOverflowScrolling="";var t=this.el_.querySelectorAll("[data-md-toggle]");Array.prototype.forEach.call(t,function(t){if(t.checked){for(var e=t.nextElementSibling;"NAV"!==e.tagName;)e=e.nextElementSibling;var n=t.parentNode.parentNode,r=e.children[e.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling=""}})}}]),t}();e.default=o,t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(79),i=r(o),s=n(80),a=r(s);e.default={Lock:i.default,Result:a.default},t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e){n(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return r(t,[{key:"setup",value:function(){this.update()}},{key:"update",value:function(){var t=this;this.el_.checked?(this.offset_=window.pageYOffset,setTimeout(function(){window.scrollTo(0,0),t.el_.checked&&(document.body.dataset.mdState="lock")},400)):(document.body.dataset.mdState="",setTimeout(function(){"undefined"!=typeof t.offset_&&window.scrollTo(0,t.offset_)},100))}},{key:"reset",value:function(){"lock"===document.body.dataset.mdState&&window.scrollTo(0,this.offset_),document.body.dataset.mdState=""}}]),t}();e.default=o,t.exports=e.default},function(t,e,n){(function(r){"use strict";function o(t){return t&&t.__esModule?t:{default:t}}function i(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var s=function(){function t(t,e){var n=[],r=!0,o=!1,i=void 0;try{for(var s,a=t[Symbol.iterator]();!(r=(s=a.next()).done)&&(n.push(s.value),!e||n.length!==e);r=!0);}catch(t){o=!0,i=t}finally{try{!r&&a.return&&a.return()}finally{if(o)throw i}}return n}return function(e,n){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return t(e,n);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}(),a=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),u=n(82),c=o(u),l=function(){function t(e,n){i(this,t),this.el_="string"==typeof e?document.querySelector(e):e,this.data_=n,this.meta_=r.createElement("div",{class:"md-search-result__meta"},"Type to start searching"),this.list_=r.createElement("ol",{class:"md-search-result__list"}),this.el_.appendChild(this.meta_),this.el_.appendChild(this.list_),this.truncate_=function(t,e){var n=e;if(t.length>n){for(;" "!==t[n]&&--n>0;);return t.substring(0,n)+"..."}return t}}return a(t,[{key:"update",value:function(t){var e=this;if("focus"!==t.type||this.index_){if("keyup"===t.type){for(;this.list_.firstChild;)this.list_.removeChild(this.list_.firstChild);var n=this.index_.search(t.target.value);n.forEach(function(t){var n=e.data_[t.ref],o=n.location.split("#"),i=s(o,1),a=i[0];a=a.replace(/^(\/?\.{2})+/g,""),e.list_.appendChild(r.createElement("li",{class:"md-search-result__item"},r.createElement("a",{href:n.location,title:n.title,class:"md-search-result__link","data-md-rel":a===document.location.pathname?"anchor":""},r.createElement("article",{class:"md-search-result__article"},r.createElement("h1",{class:"md-search-result__title"},n.title),r.createElement("p",{class:"md-search-result__teaser"},e.truncate_(n.text,140))))))});var o=this.list_.querySelectorAll("[data-md-rel=anchor]");Array.prototype.forEach.call(o,function(t){t.addEventListener("click",function(e){var n=document.querySelector("[data-md-toggle=search]");n.checked&&(n.checked=!1,n.dispatchEvent(new CustomEvent("change"))),e.preventDefault(),setTimeout(function(){document.location.href=t.href},100)})}),this.meta_.textContent=n.length+" search result"+(1!==n.length?"s":"")}}else!function(){var t=function(t){e.index_=(0,c.default)(function(){this.field("title",{boost:10}),this.field("text"),this.ref("location")}),e.data_=t.reduce(function(t,n){return e.index_.add(n),t[n.location]=n,t},{})};setTimeout(function(){return"function"==typeof e.data_?e.data_().then(t):t(e.data_)},250)}()}}]),t}();e.default=l,t.exports=e.default}).call(e,n(81))},function(t,e){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default={createElement:function(t,e){var n=document.createElement(t);e&&Array.prototype.forEach.call(Object.keys(e),function(t){n.setAttribute(t,e[t])});for(var r=function t(e){Array.prototype.forEach.call(e,function(e){"string"==typeof e||"number"==typeof e?n.textContent+=e:Array.isArray(e)?t(e):n.appendChild(e)})},o=arguments.length,i=Array(o>2?o-2:0),s=2;s<o;s++)i[s-2]=arguments[s];return r(i),n}},t.exports=e.default},function(t,e,n){var r,o;"function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};!function(){var i=function t(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};i.version="0.7.2",/*!
-	   * lunr.utils
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.utils={},i.utils.warn=function(t){return function(e){t.console&&console.warn&&console.warn(e)}}(this),i.utils.asString=function(t){return void 0===t||null===t?"":t.toString()},/*!
-	   * lunr.EventEmitter
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.EventEmitter=function(){this.events={}},i.EventEmitter.prototype.addListener=function(){var t=Array.prototype.slice.call(arguments),e=t.pop(),n=t;if("function"!=typeof e)throw new TypeError("last argument must be a function");n.forEach(function(t){this.hasHandler(t)||(this.events[t]=[]),this.events[t].push(e)},this)},i.EventEmitter.prototype.removeListener=function(t,e){if(this.hasHandler(t)){var n=this.events[t].indexOf(e);this.events[t].splice(n,1),this.events[t].length||delete this.events[t]}},i.EventEmitter.prototype.emit=function(t){if(this.hasHandler(t)){var e=Array.prototype.slice.call(arguments,1);this.events[t].forEach(function(t){t.apply(void 0,e)})}},i.EventEmitter.prototype.hasHandler=function(t){return t in this.events},/*!
-	   * lunr.tokenizer
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.tokenizer=function(t){if(!arguments.length||null==t||void 0==t)return[];if(Array.isArray(t))return t.map(function(t){return i.utils.asString(t).toLowerCase()});var e=i.tokenizer.seperator||i.tokenizer.separator;return t.toString().trim().toLowerCase().split(e)},i.tokenizer.seperator=!1,i.tokenizer.separator=/[\s\-]+/,i.tokenizer.load=function(t){var e=this.registeredFunctions[t];if(!e)throw new Error("Cannot load un-registered function: "+t);return e},i.tokenizer.label="default",i.tokenizer.registeredFunctions={default:i.tokenizer},i.tokenizer.registerFunction=function(t,e){e in this.registeredFunctions&&i.utils.warn("Overwriting existing tokenizer: "+e),t.label=e,this.registeredFunctions[e]=t},/*!
-	   * lunr.Pipeline
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.Pipeline=function(){this._stack=[]},i.Pipeline.registeredFunctions={},i.Pipeline.registerFunction=function(t,e){e in this.registeredFunctions&&i.utils.warn("Overwriting existing registered function: "+e),t.label=e,i.Pipeline.registeredFunctions[t.label]=t},i.Pipeline.warnIfFunctionNotRegistered=function(t){var e=t.label&&t.label in this.registeredFunctions;e||i.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",t)},i.Pipeline.load=function(t){var e=new i.Pipeline;return t.forEach(function(t){var n=i.Pipeline.registeredFunctions[t];if(!n)throw new Error("Cannot load un-registered function: "+t);e.add(n)}),e},i.Pipeline.prototype.add=function(){var t=Array.prototype.slice.call(arguments);t.forEach(function(t){i.Pipeline.warnIfFunctionNotRegistered(t),this._stack.push(t)},this)},i.Pipeline.prototype.after=function(t,e){i.Pipeline.warnIfFunctionNotRegistered(e);var n=this._stack.indexOf(t);if(n==-1)throw new Error("Cannot find existingFn");n+=1,this._stack.splice(n,0,e)},i.Pipeline.prototype.before=function(t,e){i.Pipeline.warnIfFunctionNotRegistered(e);var n=this._stack.indexOf(t);if(n==-1)throw new Error("Cannot find existingFn");this._stack.splice(n,0,e)},i.Pipeline.prototype.remove=function(t){var e=this._stack.indexOf(t);e!=-1&&this._stack.splice(e,1)},i.Pipeline.prototype.run=function(t){for(var e=[],n=t.length,r=this._stack.length,o=0;o<n;o++){for(var i=t[o],s=0;s<r&&(i=this._stack[s](i,o,t),void 0!==i&&""!==i);s++);void 0!==i&&""!==i&&e.push(i)}return e},i.Pipeline.prototype.reset=function(){this._stack=[]},i.Pipeline.prototype.toJSON=function(){return this._stack.map(function(t){return i.Pipeline.warnIfFunctionNotRegistered(t),t.label})},/*!
-	   * lunr.Vector
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.Vector=function(){this._magnitude=null,this.list=void 0,this.length=0},i.Vector.Node=function(t,e,n){this.idx=t,this.val=e,this.next=n},i.Vector.prototype.insert=function(t,e){this._magnitude=void 0;var n=this.list;if(!n)return this.list=new i.Vector.Node(t,e,n),this.length++;if(t<n.idx)return this.list=new i.Vector.Node(t,e,n),this.length++;for(var r=n,o=n.next;void 0!=o;){if(t<o.idx)return r.next=new i.Vector.Node(t,e,o),this.length++;r=o,o=o.next}return r.next=new i.Vector.Node(t,e,o),this.length++},i.Vector.prototype.magnitude=function(){if(this._magnitude)return this._magnitude;for(var t,e=this.list,n=0;e;)t=e.val,n+=t*t,e=e.next;return this._magnitude=Math.sqrt(n)},i.Vector.prototype.dot=function(t){for(var e=this.list,n=t.list,r=0;e&&n;)e.idx<n.idx?e=e.next:e.idx>n.idx?n=n.next:(r+=e.val*n.val,e=e.next,n=n.next);return r},i.Vector.prototype.similarity=function(t){return this.dot(t)/(this.magnitude()*t.magnitude())},/*!
-	   * lunr.SortedSet
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.SortedSet=function(){this.length=0,this.elements=[]},i.SortedSet.load=function(t){var e=new this;return e.elements=t,e.length=t.length,e},i.SortedSet.prototype.add=function(){var t,e;for(t=0;t<arguments.length;t++)e=arguments[t],~this.indexOf(e)||this.elements.splice(this.locationFor(e),0,e);this.length=this.elements.length},i.SortedSet.prototype.toArray=function(){return this.elements.slice()},i.SortedSet.prototype.map=function(t,e){return this.elements.map(t,e)},i.SortedSet.prototype.forEach=function(t,e){return this.elements.forEach(t,e)},i.SortedSet.prototype.indexOf=function(t){for(var e=0,n=this.elements.length,r=n-e,o=e+Math.floor(r/2),i=this.elements[o];r>1;){if(i===t)return o;i<t&&(e=o),i>t&&(n=o),r=n-e,o=e+Math.floor(r/2),i=this.elements[o]}return i===t?o:-1},i.SortedSet.prototype.locationFor=function(t){for(var e=0,n=this.elements.length,r=n-e,o=e+Math.floor(r/2),i=this.elements[o];r>1;)i<t&&(e=o),i>t&&(n=o),r=n-e,o=e+Math.floor(r/2),i=this.elements[o];return i>t?o:i<t?o+1:void 0},i.SortedSet.prototype.intersect=function(t){for(var e=new i.SortedSet,n=0,r=0,o=this.length,s=t.length,a=this.elements,u=t.elements;;){if(n>o-1||r>s-1)break;a[n]!==u[r]?a[n]<u[r]?n++:a[n]>u[r]&&r++:(e.add(a[n]),n++,r++)}return e},i.SortedSet.prototype.clone=function(){var t=new i.SortedSet;return t.elements=this.toArray(),t.length=t.elements.length,t},i.SortedSet.prototype.union=function(t){var e,n,r;this.length>=t.length?(e=this,n=t):(e=t,n=this),r=e.clone();for(var o=0,i=n.toArray();o<i.length;o++)r.add(i[o]);return r},i.SortedSet.prototype.toJSON=function(){return this.toArray()},/*!
-	   * lunr.Index
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.Index=function(){this._fields=[],this._ref="id",this.pipeline=new i.Pipeline,this.documentStore=new i.Store,this.tokenStore=new i.TokenStore,this.corpusTokens=new i.SortedSet,this.eventEmitter=new i.EventEmitter,this.tokenizerFn=i.tokenizer,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},i.Index.prototype.on=function(){var t=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,t)},i.Index.prototype.off=function(t,e){return this.eventEmitter.removeListener(t,e)},i.Index.load=function(t){t.version!==i.version&&i.utils.warn("version mismatch: current "+i.version+" importing "+t.version);var e=new this;return e._fields=t.fields,e._ref=t.ref,e.tokenizer(i.tokenizer.load(t.tokenizer)),e.documentStore=i.Store.load(t.documentStore),e.tokenStore=i.TokenStore.load(t.tokenStore),e.corpusTokens=i.SortedSet.load(t.corpusTokens),e.pipeline=i.Pipeline.load(t.pipeline),e},i.Index.prototype.field=function(t,e){var e=e||{},n={name:t,boost:e.boost||1};return this._fields.push(n),this},i.Index.prototype.ref=function(t){return this._ref=t,this},i.Index.prototype.tokenizer=function(t){var e=t.label&&t.label in i.tokenizer.registeredFunctions;return e||i.utils.warn("Function is not a registered tokenizer. This may cause problems when serialising the index"),this.tokenizerFn=t,this},i.Index.prototype.add=function(t,e){var n={},r=new i.SortedSet,o=t[this._ref],e=void 0===e||e;this._fields.forEach(function(e){var o=this.pipeline.run(this.tokenizerFn(t[e.name]));n[e.name]=o;for(var i=0;i<o.length;i++){var s=o[i];r.add(s),this.corpusTokens.add(s)}},this),this.documentStore.set(o,r);for(var s=0;s<r.length;s++){for(var a=r.elements[s],u=0,c=0;c<this._fields.length;c++){var l=this._fields[c],f=n[l.name],d=f.length;if(d){for(var h=0,p=0;p<d;p++)f[p]===a&&h++;u+=h/d*l.boost}}this.tokenStore.add(a,{ref:o,tf:u})}e&&this.eventEmitter.emit("add",t,this)},i.Index.prototype.remove=function(t,e){var n=t[this._ref],e=void 0===e||e;if(this.documentStore.has(n)){var r=this.documentStore.get(n);this.documentStore.remove(n),r.forEach(function(t){this.tokenStore.remove(t,n)},this),e&&this.eventEmitter.emit("remove",t,this)}},i.Index.prototype.update=function(t,e){var e=void 0===e||e;this.remove(t,!1),this.add(t,!1),e&&this.eventEmitter.emit("update",t,this)},i.Index.prototype.idf=function(t){var e="@"+t;if(Object.prototype.hasOwnProperty.call(this._idfCache,e))return this._idfCache[e];var n=this.tokenStore.count(t),r=1;return n>0&&(r=1+Math.log(this.documentStore.length/n)),this._idfCache[e]=r},i.Index.prototype.search=function(t){var e=this.pipeline.run(this.tokenizerFn(t)),n=new i.Vector,r=[],o=this._fields.reduce(function(t,e){return t+e.boost},0),s=e.some(function(t){return this.tokenStore.has(t)},this);if(!s)return[];e.forEach(function(t,e,s){var a=1/s.length*this._fields.length*o,u=this,c=this.tokenStore.expand(t).reduce(function(e,r){var o=u.corpusTokens.indexOf(r),s=u.idf(r),c=1,l=new i.SortedSet;if(r!==t){var f=Math.max(3,r.length-t.length);c=1/Math.log(f)}o>-1&&n.insert(o,a*s*c);for(var d=u.tokenStore.get(r),h=Object.keys(d),p=h.length,v=0;v<p;v++)l.add(d[h[v]].ref);return e.union(l)},new i.SortedSet);r.push(c)},this);var a=r.reduce(function(t,e){return t.intersect(e)});return a.map(function(t){return{ref:t,score:n.similarity(this.documentVector(t))}},this).sort(function(t,e){return e.score-t.score})},i.Index.prototype.documentVector=function(t){for(var e=this.documentStore.get(t),n=e.length,r=new i.Vector,o=0;o<n;o++){var s=e.elements[o],a=this.tokenStore.get(s)[t].tf,u=this.idf(s);r.insert(this.corpusTokens.indexOf(s),a*u)}return r},i.Index.prototype.toJSON=function(){return{version:i.version,fields:this._fields,ref:this._ref,tokenizer:this.tokenizerFn.label,documentStore:this.documentStore.toJSON(),tokenStore:this.tokenStore.toJSON(),corpusTokens:this.corpusTokens.toJSON(),pipeline:this.pipeline.toJSON()}},i.Index.prototype.use=function(t){var e=Array.prototype.slice.call(arguments,1);e.unshift(this),t.apply(this,e)},/*!
-	   * lunr.Store
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.Store=function(){this.store={},this.length=0},i.Store.load=function(t){var e=new this;return e.length=t.length,e.store=Object.keys(t.store).reduce(function(e,n){return e[n]=i.SortedSet.load(t.store[n]),e},{}),e},i.Store.prototype.set=function(t,e){this.has(t)||this.length++,this.store[t]=e},i.Store.prototype.get=function(t){return this.store[t]},i.Store.prototype.has=function(t){return t in this.store},i.Store.prototype.remove=function(t){this.has(t)&&(delete this.store[t],this.length--)},i.Store.prototype.toJSON=function(){return{store:this.store,length:this.length}},/*!
-	   * lunr.stemmer
-	   * Copyright (C) 2016 Oliver Nightingale
-	   * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
-	   */
-i.stemmer=function(){var t={ational:"ate",tional:"tion",enci:"ence",anci:"ance",izer:"ize",bli:"ble",alli:"al",entli:"ent",eli:"e",ousli:"ous",ization:"ize",ation:"ate",ator:"ate",alism:"al",iveness:"ive",fulness:"ful",ousness:"ous",aliti:"al",iviti:"ive",biliti:"ble",logi:"log"},e={icate:"ic",ative:"",alize:"al",iciti:"ic",ical:"ic",ful:"",ness:""},n="[^aeiou]",r="[aeiouy]",o=n+"[^aeiouy]*",i=r+"[aeiou]*",s="^("+o+")?"+i+o,a="^("+o+")?"+i+o+"("+i+")?$",u="^("+o+")?"+i+o+i+o,c="^("+o+")?"+r,l=new RegExp(s),f=new RegExp(u),d=new RegExp(a),h=new RegExp(c),p=/^(.+?)(ss|i)es$/,v=/^(.+?)([^s])s$/,y=/^(.+?)eed$/,m=/^(.+?)(ed|ing)$/,g=/.$/,b=/(at|bl|iz)$/,w=new RegExp("([^aeiouylsz])\\1$"),_=new RegExp("^"+o+r+"[^aeiouwxy]$"),S=/^(.+?[^aeiou])y$/,x=/^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/,E=/^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/,k=/^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/,T=/^(.+?)(s|t)(ion)$/,O=/^(.+?)e$/,C=/ll$/,P=new RegExp("^"+o+r+"[^aeiouwxy]$"),A=function(n){var r,o,i,s,a,u,c;if(n.length<3)return n;if(i=n.substr(0,1),"y"==i&&(n=i.toUpperCase()+n.substr(1)),s=p,a=v,s.test(n)?n=n.replace(s,"$1$2"):a.test(n)&&(n=n.replace(a,"$1$2")),s=y,a=m,s.test(n)){var A=s.exec(n);s=l,s.test(A[1])&&(s=g,n=n.replace(s,""))}else if(a.test(n)){var A=a.exec(n);r=A[1],a=h,a.test(r)&&(n=r,a=b,u=w,c=_,a.test(n)?n+="e":u.test(n)?(s=g,n=n.replace(s,"")):c.test(n)&&(n+="e"))}if(s=S,s.test(n)){var A=s.exec(n);r=A[1],n=r+"i"}if(s=x,s.test(n)){var A=s.exec(n);r=A[1],o=A[2],s=l,s.test(r)&&(n=r+t[o])}if(s=E,s.test(n)){var A=s.exec(n);r=A[1],o=A[2],s=l,s.test(r)&&(n=r+e[o])}if(s=k,a=T,s.test(n)){var A=s.exec(n);r=A[1],s=f,s.test(r)&&(n=r)}else if(a.test(n)){var A=a.exec(n);r=A[1]+A[2],a=f,a.test(r)&&(n=r)}if(s=O,s.test(n)){var A=s.exec(n);r=A[1],s=f,a=d,u=P,(s.test(r)||a.test(r)&&!u.test(r))&&(n=r)}return s=C,a=f,s.test(n)&&a.test(n)&&(s=g,n=n.replace(s,"")),"y"==i&&(n=i.toLowerCase()+n.substr(1)),n};return A}(),i.Pipeline.registerFunction(i.stemmer,"stemmer"),/*!
-	   * lunr.stopWordFilter
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.generateStopWordFilter=function(t){var e=t.reduce(function(t,e){return t[e]=e,t},{});return function(t){if(t&&e[t]!==t)return t}},i.stopWordFilter=i.generateStopWordFilter(["a","able","about","across","after","all","almost","also","am","among","an","and","any","are","as","at","be","because","been","but","by","can","cannot","could","dear","did","do","does","either","else","ever","every","for","from","get","got","had","has","have","he","her","hers","him","his","how","however","i","if","in","into","is","it","its","just","least","let","like","likely","may","me","might","most","must","my","neither","no","nor","not","of","off","often","on","only","or","other","our","own","rather","said","say","says","she","should","since","so","some","than","that","the","their","them","then","there","these","they","this","tis","to","too","twas","us","wants","was","we","were","what","when","where","which","while","who","whom","why","will","with","would","yet","you","your"]),i.Pipeline.registerFunction(i.stopWordFilter,"stopWordFilter"),/*!
-	   * lunr.trimmer
-	   * Copyright (C) 2016 Oliver Nightingale
-	   */
-i.trimmer=function(t){return t.replace(/^\W+/,"").replace(/\W+$/,"")},i.Pipeline.registerFunction(i.trimmer,"trimmer"),/*!
-	   * lunr.stemmer
-	   * Copyright (C) 2016 Oliver Nightingale
-	   * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
-	   */
-i.TokenStore=function(){this.root={docs:{}},this.length=0},i.TokenStore.load=function(t){var e=new this;return e.root=t.root,e.length=t.length,e},i.TokenStore.prototype.add=function(t,e,n){var n=n||this.root,r=t.charAt(0),o=t.slice(1);return r in n||(n[r]={docs:{}}),0===o.length?(n[r].docs[e.ref]=e,void(this.length+=1)):this.add(o,e,n[r])},i.TokenStore.prototype.has=function(t){if(!t)return!1;for(var e=this.root,n=0;n<t.length;n++){if(!e[t.charAt(n)])return!1;e=e[t.charAt(n)]}return!0},i.TokenStore.prototype.getNode=function(t){if(!t)return{};for(var e=this.root,n=0;n<t.length;n++){if(!e[t.charAt(n)])return{};e=e[t.charAt(n)]}return e},i.TokenStore.prototype.get=function(t,e){return this.getNode(t,e).docs||{}},i.TokenStore.prototype.count=function(t,e){return Object.keys(this.get(t,e)).length},i.TokenStore.prototype.remove=function(t,e){if(t){for(var n=this.root,r=0;r<t.length;r++){if(!(t.charAt(r)in n))return;n=n[t.charAt(r)]}delete n.docs[e]}},i.TokenStore.prototype.expand=function(t,e){var n=this.getNode(t),r=n.docs||{},e=e||[];return Object.keys(r).length&&e.push(t),Object.keys(n).forEach(function(n){"docs"!==n&&e.concat(this.expand(t+n,e))},this),e},i.TokenStore.prototype.toJSON=function(){return{root:this.root,length:this.length}},function(i,s){r=s,o="function"==typeof r?r.call(e,n,e,t):r,!(void 0!==o&&(t.exports=o))}(this,function(){return i})}()},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(84),i=r(o),s=n(85),a=r(s);e.default={Container:i.default,Position:a.default},t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e){n(this,t),this.el_="string"==typeof e?document.querySelector(e):e,this.parent_=this.el_.parentNode}return r(t,[{key:"setup",value:function(){this.update()}},{key:"update",value:function(){var t=this.parent_.offsetHeight-this.el_.offsetTop;this.el_.style.minHeight=t+"px"}},{key:"reset",value:function(){this.el_.style.minHeight=""}}]),t}();e.default=o,t.exports=e.default},function(t,e){"use strict";function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function t(e){n(this,t),this.el_="string"==typeof e?document.querySelector(e):e,this.parent_=this.el_.parentNode,this.height_=0}return r(t,[{key:"setup",value:function(){this.offset_=this.el_.offsetTop-this.parent_.offsetTop,this.update()}},{key:"update",value:function(){var t=window.pageYOffset,e=window.innerHeight;this.bounds_={top:this.parent_.offsetTop,bottom:this.parent_.offsetTop+this.parent_.offsetHeight};var n=e-this.bounds_.top-Math.max(0,this.offset_-t)-Math.max(0,t+e-this.bounds_.bottom);n!==this.height_&&(this.el_.style.height=(this.height_=n)+"px"),t>=this.offset_?"lock"!==this.el_.dataset.mdState&&(this.el_.dataset.mdState="lock"):"lock"===this.el_.dataset.mdState&&(this.el_.dataset.mdState="")}},{key:"reset",value:function(){this.el_.dataset.mdState="",this.el_.style.height="",this.height_=0}}]),t}();e.default=o,t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(87),i=r(o),s=n(91),a=r(s);e.default={Adapter:i.default,Repository:a.default},t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(e,"__esModule",{value:!0});var o=n(88),i=r(o);e.default={GitHub:i.default},t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}function o(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function i(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e}function s(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)}Object.defineProperty(e,"__esModule",{value:!0});var a=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),u=n(89),c=r(u),l=function(t){function e(t){o(this,e);var n=i(this,(e.__proto__||Object.getPrototypeOf(e)).call(this,t));return n.base_=n.base_.replace("github.com/","api.github.com/repos/"),n}return s(e,t),a(e,[{key:"fetch_",value:function(){var t=this;return fetch(this.base_).then(function(t){return t.json()}).then(function(e){return[t.format_(e.stargazers_count)+" Stars",t.format_(e.forks_count)+" Forks"]})}}]),e}(c.default);e.default=l,t.exports=e.default},function(t,e,n){"use strict";function r(t){return t&&t.__esModule?t:{default:t}}function o(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var i=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),s=n(90),a=r(s),u=function(){function t(e){o(this,t),this.el_="string"==typeof e?document.querySelector(e):e,this.base_=this.el_.href,this.salt_=this.hash_(this.base_)}return i(t,[{key:"fetch",value:function(){var t=this;return new Promise(function(e){var n=a.default.getJSON(t.salt_+".cache-source");"undefined"!=typeof n?e(n):t.fetch_().then(function(n){a.default.set(t.salt_+".cache-source",n,{expires:1/96}),e(n)})})}},{key:"fetch_",value:function(){throw new Error("fetch_(): Not implemented")}},{key:"format_",value:function(t){return t>1e4?(t/1e3).toFixed(0)+"k":t>1e3?(t/1e3).toFixed(1)+"k":t}},{key:"hash_",value:function(t){var e=0;if(0===t.length)return e;for(var n=0,r=t.length;n<r;n++)e=(e<<5)-e+t.charCodeAt(n),e|=0;return e}}]),t}();e.default=u,t.exports=e.default},function(t,e,n){var r,o,i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};!function(s){var a=!1;if(r=s,o="function"==typeof r?r.call(e,n,e,t):r,!(void 0!==o&&(t.exports=o)),a=!0,"object"===i(e)&&(t.exports=s(),a=!0),!a){var u=window.Cookies,c=window.Cookies=s();c.noConflict=function(){return window.Cookies=u,c}}}(function(){function t(){for(var t=0,e={};t<arguments.length;t++){var n=arguments[t];for(var r in n)e[r]=n[r]}return e}function e(n){function r(e,o,i){var s;if("undefined"!=typeof document){if(arguments.length>1){if(i=t({path:"/"},r.defaults,i),"number"==typeof i.expires){var a=new Date;a.setMilliseconds(a.getMilliseconds()+864e5*i.expires),i.expires=a}try{s=JSON.stringify(o),/^[\{\[]/.test(s)&&(o=s)}catch(t){}return o=n.write?n.write(o,e):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),e=encodeURIComponent(String(e)),e=e.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),e=e.replace(/[\(\)]/g,escape),document.cookie=[e,"=",o,i.expires?"; expires="+i.expires.toUTCString():"",i.path?"; path="+i.path:"",i.domain?"; domain="+i.domain:"",i.secure?"; secure":""].join("")}e||(s={});for(var u=document.cookie?document.cookie.split("; "):[],c=/(%[0-9A-Z]{2})+/g,l=0;l<u.length;l++){var f=u[l].split("="),d=f.slice(1).join("=");'"'===d.charAt(0)&&(d=d.slice(1,-1));try{var h=f[0].replace(c,decodeURIComponent);if(d=n.read?n.read(d,h):n(d,h)||d.replace(c,decodeURIComponent),this.json)try{d=JSON.parse(d)}catch(t){}if(e===h){s=d;break}e||(s[h]=d)}catch(t){}}return s}}return r.set=r,r.get=function(t){return r.call(r,t)},r.getJSON=function(){return r.apply({json:!0},[].slice.call(arguments))},r.defaults={},r.remove=function(e,n){r(e,"",t(n,{expires:-1}))},r.withConverter=e,r}return e(function(){})})},function(t,e,n){(function(n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(e,"__esModule",{value:!0});var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e){r(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return o(t,[{key:"initialize",value:function(t){t.length&&this.el_.children[this.el_.children.length-1].appendChild(n.createElement("ul",{class:"md-source__facts"},t.map(function(t){return n.createElement("li",{class:"md-source__fact"},t)}))),this.el_.dataset.mdState="done"}}]),t}();e.default=i,t.exports=e.default}).call(e,n(81))}]);
\ No newline at end of file
diff --git a/material/assets/javascripts/application-f7ac33b6fb.js b/material/assets/javascripts/application-f7ac33b6fb.js
new file mode 100644
index 0000000000000000000000000000000000000000..413748d2ed7d42c93f3123821c955a4ff6102779
--- /dev/null
+++ b/material/assets/javascripts/application-f7ac33b6fb.js
@@ -0,0 +1,3 @@
+window.app=function(t){function e(r){if(n[r])return n[r].exports;var o=n[r]={i:r,l:!1,exports:{}};return t[r].call(o.exports,o,o.exports,e),o.l=!0,o.exports}var n={};return e.m=t,e.c=n,e.i=function(t){return t},e.d=function(t,n,r){e.o(t,n)||Object.defineProperty(t,n,{configurable:!1,enumerable:!0,get:r})},e.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return e.d(n,"a",n),n},e.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},e.p="",e(e.s=90)}([function(t,e,n){"use strict";var r=n(30)("wks"),o=n(21),i=n(1).Symbol,s="function"==typeof i,a=t.exports=function(t){return r[t]||(r[t]=s&&i[t]||(s?i:o)("Symbol."+t))};a.store=r},function(t,e,n){"use strict";var r=t.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=r)},function(t,e,n){"use strict";var r=n(11);t.exports=function(t){if(!r(t))throw TypeError(t+" is not an object!");return t}},function(t,e,n){"use strict";var r=n(12),o=n(29);t.exports=n(5)?function(t,e,n){return r.f(t,e,o(1,n))}:function(t,e,n){return t[e]=n,t}},function(t,e,n){"use strict";var r=t.exports={version:"2.4.0"};"number"==typeof __e&&(__e=r)},function(t,e,n){"use strict";t.exports=!n(25)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(t,e,n){"use strict";var r={}.hasOwnProperty;t.exports=function(t,e){return r.call(t,e)}},function(t,e,n){"use strict";t.exports={}},function(t,e,n){"use strict";var r=n(1),o=n(3),i=n(6),s=n(21)("src"),a="toString",c=Function[a],u=(""+c).split(a);n(4).inspectSource=function(t){return c.call(t)},(t.exports=function(t,e,n,a){var c="function"==typeof n;c&&(i(n,"name")||o(n,"name",e)),t[e]!==n&&(c&&(i(n,s)||o(n,s,t[e]?""+t[e]:u.join(String(e)))),t===r?t[e]=n:a?t[e]?t[e]=n:o(t,e,n):(delete t[e],o(t,e,n)))})(Function.prototype,a,function(){return"function"==typeof this&&this[s]||c.call(this)})},function(t,e,n){"use strict";var r={}.toString;t.exports=function(t){return r.call(t).slice(8,-1)}},function(t,e,n){"use strict";var r=n(13);t.exports=function(t,e,n){if(r(t),void 0===e)return t;switch(n){case 1:return function(n){return t.call(e,n)};case 2:return function(n,r){return t.call(e,n,r)};case 3:return function(n,r,o){return t.call(e,n,r,o)}}return function(){return t.apply(e,arguments)}}},function(t,e,n){"use strict";var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};t.exports=function(t){return"object"===("undefined"==typeof t?"undefined":r(t))?null!==t:"function"==typeof t}},function(t,e,n){"use strict";var r=n(2),o=n(42),i=n(62),s=Object.defineProperty;e.f=n(5)?Object.defineProperty:function(t,e,n){if(r(t),e=i(e,!0),r(n),o)try{return s(t,e,n)}catch(t){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(t[e]=n.value),t}},function(t,e,n){"use strict";t.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},function(t,e,n){"use strict";var r=n(9),o=n(0)("toStringTag"),i="Arguments"==r(function(){return arguments}()),s=function(t,e){try{return t[e]}catch(t){}};t.exports=function(t){var e,n,a;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(n=s(e=Object(t),o))?n:i?r(e):"Object"==(a=r(e))&&"function"==typeof e.callee?"Arguments":a}},function(t,e,n){"use strict";t.exports=function(t){if(void 0==t)throw TypeError("Can't call method on  "+t);return t}},function(t,e,n){"use strict";var r=n(11),o=n(1).document,i=r(o)&&r(o.createElement);t.exports=function(t){return i?o.createElement(t):{}}},function(t,e,n){"use strict";var r=n(12).f,o=n(6),i=n(0)("toStringTag");t.exports=function(t,e,n){t&&!o(t=n?t:t.prototype,i)&&r(t,i,{configurable:!0,value:e})}},function(t,e,n){"use strict";var r=n(30)("keys"),o=n(21);t.exports=function(t){return r[t]||(r[t]=o(t))}},function(t,e,n){"use strict";var r=Math.ceil,o=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?o:r)(t)}},function(t,e,n){"use strict";var r=n(44),o=n(15);t.exports=function(t){return r(o(t))}},function(t,e,n){"use strict";var r=0,o=Math.random();t.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++r+o).toString(36))}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default={createElement:function(t,e){var n=document.createElement(t);e&&Array.prototype.forEach.call(Object.keys(e),function(t){n.setAttribute(t,e[t])});for(var r=function t(e){Array.prototype.forEach.call(e,function(e){"string"==typeof e||"number"==typeof e?n.textContent+=e:Array.isArray(e)?t(e):n.appendChild(e)})},o=arguments.length,i=Array(o>2?o-2:0),s=2;s<o;s++)i[s-2]=arguments[s];return r(i),n}},t.exports=e.default},function(t,e,n){"use strict";t.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(t,e,n){"use strict";var r=n(1),o=n(4),i=n(3),s=n(8),a=n(10),c="prototype",u=function t(e,n,u){var l,f,h,d,p=e&t.F,v=e&t.G,y=e&t.S,m=e&t.P,g=e&t.B,w=v?r:y?r[n]||(r[n]={}):(r[n]||{})[c],b=v?o:o[n]||(o[n]={}),_=b[c]||(b[c]={});v&&(u=n);for(l in u)f=!p&&w&&void 0!==w[l],h=(f?w:u)[l],d=g&&f?a(h,r):m&&"function"==typeof h?a(Function.call,h):h,w&&s(w,l,h,e&t.U),b[l]!=h&&i(b,l,d),m&&_[l]!=h&&(_[l]=h)};r.core=o,u.F=1,u.G=2,u.S=4,u.P=8,u.B=16,u.W=32,u.U=64,u.R=128,t.exports=u},function(t,e,n){"use strict";t.exports=function(t){try{return!!t()}catch(t){return!0}}},function(t,e,n){"use strict";t.exports=n(1).document&&document.documentElement},function(t,e,n){"use strict";var r=n(28),o=n(24),i=n(8),s=n(3),a=n(6),c=n(7),u=n(47),l=n(17),f=n(53),h=n(0)("iterator"),d=!([].keys&&"next"in[].keys()),p="@@iterator",v="keys",y="values",m=function(){return this};t.exports=function(t,e,n,g,w,b,_){u(n,e,g);var S,x,E,k=function(t){if(!d&&t in A)return A[t];switch(t){case v:return function(){return new n(this,t)};case y:return function(){return new n(this,t)}}return function(){return new n(this,t)}},T=e+" Iterator",O=w==y,C=!1,A=t.prototype,P=A[h]||A[p]||w&&A[w],j=P||k(w),M=w?O?k("entries"):j:void 0,F="Array"==e?A.entries||P:P;if(F&&(E=f(F.call(new t)),E!==Object.prototype&&(l(E,T,!0),r||a(E,h)||s(E,h,m))),O&&P&&P.name!==y&&(C=!0,j=function(){return P.call(this)}),r&&!_||!d&&!C&&A[h]||s(A,h,j),c[e]=j,c[T]=m,w)if(S={values:O?j:k(y),keys:b?j:k(v),entries:M},_)for(x in S)x in A||i(A,x,S[x]);else o(o.P+o.F*(d||C),e,S);return S}},function(t,e,n){"use strict";t.exports=!1},function(t,e,n){"use strict";t.exports=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}}},function(t,e,n){"use strict";var r=n(1),o="__core-js_shared__",i=r[o]||(r[o]={});t.exports=function(t){return i[t]||(i[t]={})}},function(t,e,n){"use strict";var r,o,i,s=n(10),a=n(43),c=n(26),u=n(16),l=n(1),f=l.process,h=l.setImmediate,d=l.clearImmediate,p=l.MessageChannel,v=0,y={},m="onreadystatechange",g=function(){var t=+this;if(y.hasOwnProperty(t)){var e=y[t];delete y[t],e()}},w=function(t){g.call(t.data)};h&&d||(h=function(t){for(var e=[],n=1;arguments.length>n;)e.push(arguments[n++]);return y[++v]=function(){a("function"==typeof t?t:Function(t),e)},r(v),v},d=function(t){delete y[t]},"process"==n(9)(f)?r=function(t){f.nextTick(s(g,t,1))}:p?(o=new p,i=o.port2,o.port1.onmessage=w,r=s(i.postMessage,i,1)):l.addEventListener&&"function"==typeof postMessage&&!l.importScripts?(r=function(t){l.postMessage(t+"","*")},l.addEventListener("message",w,!1)):r=m in u("script")?function(t){c.appendChild(u("script"))[m]=function(){c.removeChild(this),g.call(t)}}:function(t){setTimeout(s(g,t,1),0)}),t.exports={set:h,clear:d}},function(t,e,n){"use strict";var r=n(19),o=Math.min;t.exports=function(t){return t>0?o(r(t),9007199254740991):0}},function(t,e){(function(e){t.exports=e}).call(e,{})},function(t,e,n){"use strict";n(65),n(67),n(68),n(66),t.exports=n(4).Promise},function(t,e,n){"use strict";try{var r=new window.CustomEvent("test");if(r.preventDefault(),r.defaultPrevented!==!0)throw new Error("Could not prevent default")}catch(t){var o=function(t,e){var n,r;return e=e||{bubbles:!1,cancelable:!1,detail:void 0},n=document.createEvent("CustomEvent"),n.initCustomEvent(t,e.bubbles,e.cancelable,e.detail),r=n.preventDefault,n.preventDefault=function(){r.call(this);try{Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}})}catch(t){this.defaultPrevented=!0}},n};o.prototype=window.Event.prototype,window.CustomEvent=o}},function(t,e,n){"use strict";!function(t){function e(t){if("string"!=typeof t&&(t=String(t)),/[^a-z0-9\-#$%&'*+.\^_`|~]/i.test(t))throw new TypeError("Invalid character in header field name");return t.toLowerCase()}function n(t){return"string"!=typeof t&&(t=String(t)),t}function r(t){var e={next:function(){var e=t.shift();return{done:void 0===e,value:e}}};return m.iterable&&(e[Symbol.iterator]=function(){return e}),e}function o(t){this.map={},t instanceof o?t.forEach(function(t,e){this.append(e,t)},this):t&&Object.getOwnPropertyNames(t).forEach(function(e){this.append(e,t[e])},this)}function i(t){return t.bodyUsed?Promise.reject(new TypeError("Already read")):void(t.bodyUsed=!0)}function s(t){return new Promise(function(e,n){t.onload=function(){e(t.result)},t.onerror=function(){n(t.error)}})}function a(t){var e=new FileReader,n=s(e);return e.readAsArrayBuffer(t),n}function c(t){var e=new FileReader,n=s(e);return e.readAsText(t),n}function u(t){for(var e=new Uint8Array(t),n=new Array(e.length),r=0;r<e.length;r++)n[r]=String.fromCharCode(e[r]);return n.join("")}function l(t){if(t.slice)return t.slice(0);var e=new Uint8Array(t.byteLength);return e.set(new Uint8Array(t)),e.buffer}function f(){return this.bodyUsed=!1,this._initBody=function(t){if(this._bodyInit=t,t)if("string"==typeof t)this._bodyText=t;else if(m.blob&&Blob.prototype.isPrototypeOf(t))this._bodyBlob=t;else if(m.formData&&FormData.prototype.isPrototypeOf(t))this._bodyFormData=t;else if(m.searchParams&&URLSearchParams.prototype.isPrototypeOf(t))this._bodyText=t.toString();else if(m.arrayBuffer&&m.blob&&w(t))this._bodyArrayBuffer=l(t.buffer),this._bodyInit=new Blob([this._bodyArrayBuffer]);else{if(!m.arrayBuffer||!ArrayBuffer.prototype.isPrototypeOf(t)&&!b(t))throw new Error("unsupported BodyInit type");this._bodyArrayBuffer=l(t)}else this._bodyText="";this.headers.get("content-type")||("string"==typeof t?this.headers.set("content-type","text/plain;charset=UTF-8"):this._bodyBlob&&this._bodyBlob.type?this.headers.set("content-type",this._bodyBlob.type):m.searchParams&&URLSearchParams.prototype.isPrototypeOf(t)&&this.headers.set("content-type","application/x-www-form-urlencoded;charset=UTF-8"))},m.blob&&(this.blob=function(){var t=i(this);if(t)return t;if(this._bodyBlob)return Promise.resolve(this._bodyBlob);if(this._bodyArrayBuffer)return Promise.resolve(new Blob([this._bodyArrayBuffer]));if(this._bodyFormData)throw new Error("could not read FormData body as blob");return Promise.resolve(new Blob([this._bodyText]))},this.arrayBuffer=function(){return this._bodyArrayBuffer?i(this)||Promise.resolve(this._bodyArrayBuffer):this.blob().then(a)}),this.text=function(){var t=i(this);if(t)return t;if(this._bodyBlob)return c(this._bodyBlob);if(this._bodyArrayBuffer)return Promise.resolve(u(this._bodyArrayBuffer));if(this._bodyFormData)throw new Error("could not read FormData body as text");return Promise.resolve(this._bodyText)},m.formData&&(this.formData=function(){return this.text().then(p)}),this.json=function(){return this.text().then(JSON.parse)},this}function h(t){var e=t.toUpperCase();return _.indexOf(e)>-1?e:t}function d(t,e){e=e||{};var n=e.body;if(t instanceof d){if(t.bodyUsed)throw new TypeError("Already read");this.url=t.url,this.credentials=t.credentials,e.headers||(this.headers=new o(t.headers)),this.method=t.method,this.mode=t.mode,n||null==t._bodyInit||(n=t._bodyInit,t.bodyUsed=!0)}else this.url=String(t);if(this.credentials=e.credentials||this.credentials||"omit",!e.headers&&this.headers||(this.headers=new o(e.headers)),this.method=h(e.method||this.method||"GET"),this.mode=e.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function p(t){var e=new FormData;return t.trim().split("&").forEach(function(t){if(t){var n=t.split("="),r=n.shift().replace(/\+/g," "),o=n.join("=").replace(/\+/g," ");e.append(decodeURIComponent(r),decodeURIComponent(o))}}),e}function v(t){var e=new o;return t.split(/\r?\n/).forEach(function(t){var n=t.split(":"),r=n.shift().trim();if(r){var o=n.join(":").trim();e.append(r,o)}}),e}function y(t,e){e||(e={}),this.type="default",this.status="status"in e?e.status:200,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in e?e.statusText:"OK",this.headers=new o(e.headers),this.url=e.url||"",this._initBody(t)}if(!t.fetch){var m={searchParams:"URLSearchParams"in t,iterable:"Symbol"in t&&"iterator"in Symbol,blob:"FileReader"in t&&"Blob"in t&&function(){try{return new Blob,!0}catch(t){return!1}}(),formData:"FormData"in t,arrayBuffer:"ArrayBuffer"in t};if(m.arrayBuffer)var g=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],w=function(t){return t&&DataView.prototype.isPrototypeOf(t)},b=ArrayBuffer.isView||function(t){return t&&g.indexOf(Object.prototype.toString.call(t))>-1};o.prototype.append=function(t,r){t=e(t),r=n(r);var o=this.map[t];this.map[t]=o?o+","+r:r},o.prototype.delete=function(t){delete this.map[e(t)]},o.prototype.get=function(t){return t=e(t),this.has(t)?this.map[t]:null},o.prototype.has=function(t){return this.map.hasOwnProperty(e(t))},o.prototype.set=function(t,r){this.map[e(t)]=n(r)},o.prototype.forEach=function(t,e){for(var n in this.map)this.map.hasOwnProperty(n)&&t.call(e,this.map[n],n,this)},o.prototype.keys=function(){var t=[];return this.forEach(function(e,n){t.push(n)}),r(t)},o.prototype.values=function(){var t=[];return this.forEach(function(e){t.push(e)}),r(t)},o.prototype.entries=function(){var t=[];return this.forEach(function(e,n){t.push([n,e])}),r(t)},m.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var _=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];d.prototype.clone=function(){return new d(this,{body:this._bodyInit})},f.call(d.prototype),f.call(y.prototype),y.prototype.clone=function(){return new y(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},y.error=function(){var t=new y(null,{status:0,statusText:""});return t.type="error",t};var S=[301,302,303,307,308];y.redirect=function(t,e){if(S.indexOf(e)===-1)throw new RangeError("Invalid status code");return new y(null,{status:e,headers:{location:t}})},t.Headers=o,t.Request=d,t.Response=y,t.fetch=function(t,e){return new Promise(function(n,r){var o=new d(t,e),i=new XMLHttpRequest;i.onload=function(){var t={status:i.status,statusText:i.statusText,headers:v(i.getAllResponseHeaders()||"")};t.url="responseURL"in i?i.responseURL:t.headers.get("X-Request-URL");var e="response"in i?i.response:i.responseText;n(new y(e,t))},i.onerror=function(){r(new TypeError("Network request failed"))},i.ontimeout=function(){r(new TypeError("Network request failed"))},i.open(o.method,o.url,!0),"include"===o.credentials&&(i.withCredentials=!0),"responseType"in i&&m.blob&&(i.responseType="blob"),o.headers.forEach(function(t,e){i.setRequestHeader(e,t)}),i.send("undefined"==typeof o._bodyInit?null:o._bodyInit)})},t.fetch.polyfill=!0}}("undefined"!=typeof self?self:void 0)},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(69),o=n.n(r),i=n(72);n.d(e,"initialize",function(){return s});var s=function(t){new i.a.Event.Listener(document,"DOMContentLoaded",function(){Modernizr.addTest("ios",function(){return!!navigator.userAgent.match(/(iPad|iPhone|iPod)/g)}),Modernizr.addTest("standalone",function(){return!!navigator.standalone}),o.a.attach(document.body);var t=document.querySelectorAll("table:not([class])");if(Array.prototype.forEach.call(t,function(t){var e=document.createElement("div");e.classList.add("md-typeset__table"),t.nextSibling?t.parentNode.insertBefore(e,t.nextSibling):t.parentNode.appendChild(e),e.appendChild(t)}),Modernizr.ios){var e=document.querySelectorAll("[data-md-scrollfix]");Array.prototype.forEach.call(e,function(t){t.addEventListener("touchstart",function(){var e=t.scrollTop;0===e?t.scrollTop=1:e+t.offsetHeight===t.scrollHeight&&(t.scrollTop=e-1)})})}}).listen(),new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=navigation]"))),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=toc]"))),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,"scroll",new i.a.Nav.Blur("[data-md-component=toc] .md-nav__link")));var e=document.querySelectorAll("[data-md-component=collapsible]");Array.prototype.forEach.call(e,function(t){new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(t.previousElementSibling,"click",new i.a.Nav.Collapse(t)))}),new i.a.Event.MatchMedia("(max-width: 1219px)",new i.a.Event.Listener("[data-md-component=navigation] [data-md-toggle]","change",new i.a.Nav.Scrolling("[data-md-component=navigation] nav"))),new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-toggle=search]","change",new i.a.Search.Lock("[data-md-toggle=search]"))),new i.a.Event.Listener(document.forms.search.query,["focus","keyup"],new i.a.Search.Result("[data-md-component=result]",function(){return fetch(t.url.base+"/mkdocs/search_index.json",{credentials:"same-origin"}).then(function(t){return t.json()}).then(function(e){return e.docs.map(function(e){return e.location=t.url.base+e.location,e})})})).listen(),new i.a.Event.MatchMedia("(max-width: 1219px)",new i.a.Event.Listener("[data-md-component=overlay]","touchstart",function(t){return t.preventDefault()})),new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-component=navigation] [href^='#']","click",function(){var t=document.querySelector("[data-md-toggle=drawer]");t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change")))})),new i.a.Event.Listener("[data-md-toggle=search]","change",function(t){setTimeout(function(t){var e=document.forms.search.query;t.checked&&e.focus()},400,t.target)}).listen(),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(document.forms.search.query,"focus",function(){var t=document.querySelector("[data-md-toggle=search]");t.checked||(t.checked=!0,t.dispatchEvent(new CustomEvent("change")))})),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(document.body,"click",function(){var t=document.querySelector("[data-md-toggle=search]");t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change")))})),new i.a.Event.Listener(window,"keyup",function(t){var e=t.keyCode||t.which;if(27===e){var n=document.querySelector("[data-md-toggle=search]");n.checked&&(n.checked=!1,n.dispatchEvent(new CustomEvent("change")),document.forms.search.query.blur())}}).listen(),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener("[data-md-toggle=search]","click",function(t){return t.stopPropagation()})),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener("[data-md-component=search]","click",function(t){return t.stopPropagation()})),function(){var t=document.querySelector("[data-md-source]");if(!t)return Promise.resolve([]);switch(t.dataset.mdSource){case"github":return new i.a.Source.Adapter.GitHub(t).fetch();default:return Promise.resolve([])}}().then(function(t){var e=document.querySelectorAll("[data-md-source]");Array.prototype.forEach.call(e,function(e){new i.a.Source.Repository(e).initialize(t)})})}},function(t,e,n){"use strict";var r=n(0)("unscopables"),o=Array.prototype;void 0==o[r]&&n(3)(o,r,{}),t.exports=function(t){o[r][t]=!0}},function(t,e,n){"use strict";t.exports=function(t,e,n,r){if(!(t instanceof e)||void 0!==r&&r in t)throw TypeError(n+": incorrect invocation!");return t}},function(t,e,n){"use strict";var r=n(20),o=n(32),i=n(60);t.exports=function(t){return function(e,n,s){var a,c=r(e),u=o(c.length),l=i(s,u);if(t&&n!=n){for(;u>l;)if(a=c[l++],a!=a)return!0}else for(;u>l;l++)if((t||l in c)&&c[l]===n)return t||l||0;return!t&&-1}}},function(t,e,n){"use strict";var r=n(10),o=n(46),i=n(45),s=n(2),a=n(32),c=n(63),u={},l={},f=t.exports=function(t,e,n,f,h){var d,p,v,y,m=h?function(){return t}:c(t),g=r(n,f,e?2:1),w=0;if("function"!=typeof m)throw TypeError(t+" is not iterable!");if(i(m)){for(d=a(t.length);d>w;w++)if(y=e?g(s(p=t[w])[0],p[1]):g(t[w]),y===u||y===l)return y}else for(v=m.call(t);!(p=v.next()).done;)if(y=o(v,g,p.value,e),y===u||y===l)return y};f.BREAK=u,f.RETURN=l},function(t,e,n){"use strict";t.exports=!n(5)&&!n(25)(function(){return 7!=Object.defineProperty(n(16)("div"),"a",{get:function(){return 7}}).a})},function(t,e,n){"use strict";t.exports=function(t,e,n){var r=void 0===n;switch(e.length){case 0:return r?t():t.call(n);case 1:return r?t(e[0]):t.call(n,e[0]);case 2:return r?t(e[0],e[1]):t.call(n,e[0],e[1]);case 3:return r?t(e[0],e[1],e[2]):t.call(n,e[0],e[1],e[2]);case 4:return r?t(e[0],e[1],e[2],e[3]):t.call(n,e[0],e[1],e[2],e[3])}return t.apply(n,e)}},function(t,e,n){"use strict";var r=n(9);t.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==r(t)?t.split(""):Object(t)}},function(t,e,n){"use strict";var r=n(7),o=n(0)("iterator"),i=Array.prototype;t.exports=function(t){return void 0!==t&&(r.Array===t||i[o]===t)}},function(t,e,n){"use strict";var r=n(2);t.exports=function(t,e,n,o){try{return o?e(r(n)[0],n[1]):e(n)}catch(e){var i=t.return;throw void 0!==i&&r(i.call(t)),e}}},function(t,e,n){"use strict";var r=n(51),o=n(29),i=n(17),s={};n(3)(s,n(0)("iterator"),function(){return this}),t.exports=function(t,e,n){t.prototype=r(s,{next:o(1,n)}),i(t,e+" Iterator")}},function(t,e,n){"use strict";var r=n(0)("iterator"),o=!1;try{var i=[7][r]();i.return=function(){o=!0},Array.from(i,function(){throw 2})}catch(t){}t.exports=function(t,e){if(!e&&!o)return!1;var n=!1;try{var i=[7],s=i[r]();s.next=function(){return{done:n=!0}},i[r]=function(){return s},t(i)}catch(t){}return n}},function(t,e,n){"use strict";t.exports=function(t,e){return{value:e,done:!!t}}},function(t,e,n){"use strict";var r=n(1),o=n(31).set,i=r.MutationObserver||r.WebKitMutationObserver,s=r.process,a=r.Promise,c="process"==n(9)(s);t.exports=function(){var t,e,n,u=function(){var r,o;for(c&&(r=s.domain)&&r.exit();t;){o=t.fn,t=t.next;try{o()}catch(r){throw t?n():e=void 0,r}}e=void 0,r&&r.enter()};if(c)n=function(){s.nextTick(u)};else if(i){var l=!0,f=document.createTextNode("");new i(u).observe(f,{characterData:!0}),n=function(){f.data=l=!l}}else if(a&&a.resolve){var h=a.resolve();n=function(){h.then(u)}}else n=function(){o.call(r,u)};return function(r){var o={fn:r,next:void 0};e&&(e.next=o),t||(t=o,n()),e=o}}},function(t,e,n){"use strict";var r=n(2),o=n(52),i=n(23),s=n(18)("IE_PROTO"),a=function(){},c="prototype",u=function(){var t,e=n(16)("iframe"),r=i.length,o="<",s=">";for(e.style.display="none",n(26).appendChild(e),e.src="javascript:",t=e.contentWindow.document,t.open(),t.write(o+"script"+s+"document.F=Object"+o+"/script"+s),t.close(),u=t.F;r--;)delete u[c][i[r]];return u()};t.exports=Object.create||function(t,e){var n;return null!==t?(a[c]=r(t),n=new a,a[c]=null,n[s]=t):n=u(),void 0===e?n:o(n,e)}},function(t,e,n){"use strict";var r=n(12),o=n(2),i=n(55);t.exports=n(5)?Object.defineProperties:function(t,e){o(t);for(var n,s=i(e),a=s.length,c=0;a>c;)r.f(t,n=s[c++],e[n]);return t}},function(t,e,n){"use strict";var r=n(6),o=n(61),i=n(18)("IE_PROTO"),s=Object.prototype;t.exports=Object.getPrototypeOf||function(t){return t=o(t),r(t,i)?t[i]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?s:null}},function(t,e,n){"use strict";var r=n(6),o=n(20),i=n(40)(!1),s=n(18)("IE_PROTO");t.exports=function(t,e){var n,a=o(t),c=0,u=[];for(n in a)n!=s&&r(a,n)&&u.push(n);for(;e.length>c;)r(a,n=e[c++])&&(~i(u,n)||u.push(n));return u}},function(t,e,n){"use strict";var r=n(54),o=n(23);t.exports=Object.keys||function(t){return r(t,o)}},function(t,e,n){"use strict";var r=n(8);t.exports=function(t,e,n){for(var o in e)r(t,o,e[o],n);return t}},function(t,e,n){"use strict";var r=n(1),o=n(12),i=n(5),s=n(0)("species");t.exports=function(t){var e=r[t];i&&e&&!e[s]&&o.f(e,s,{configurable:!0,get:function(){return this}})}},function(t,e,n){"use strict";var r=n(2),o=n(13),i=n(0)("species");t.exports=function(t,e){var n,s=r(t).constructor;return void 0===s||void 0==(n=r(s)[i])?e:o(n)}},function(t,e,n){"use strict";var r=n(19),o=n(15);t.exports=function(t){return function(e,n){var i,s,a=String(o(e)),c=r(n),u=a.length;return c<0||c>=u?t?"":void 0:(i=a.charCodeAt(c),i<55296||i>56319||c+1===u||(s=a.charCodeAt(c+1))<56320||s>57343?t?a.charAt(c):i:t?a.slice(c,c+2):(i-55296<<10)+(s-56320)+65536)}}},function(t,e,n){"use strict";var r=n(19),o=Math.max,i=Math.min;t.exports=function(t,e){return t=r(t),t<0?o(t+e,0):i(t,e)}},function(t,e,n){"use strict";var r=n(15);t.exports=function(t){return Object(r(t))}},function(t,e,n){"use strict";var r=n(11);t.exports=function(t,e){if(!r(t))return t;var n,o;if(e&&"function"==typeof(n=t.toString)&&!r(o=n.call(t)))return o;if("function"==typeof(n=t.valueOf)&&!r(o=n.call(t)))return o;if(!e&&"function"==typeof(n=t.toString)&&!r(o=n.call(t)))return o;throw TypeError("Can't convert object to primitive value")}},function(t,e,n){"use strict";var r=n(14),o=n(0)("iterator"),i=n(7);t.exports=n(4).getIteratorMethod=function(t){if(void 0!=t)return t[o]||t["@@iterator"]||i[r(t)]}},function(t,e,n){"use strict";var r=n(38),o=n(49),i=n(7),s=n(20);t.exports=n(27)(Array,"Array",function(t,e){this._t=s(t),this._i=0,this._k=e},function(){var t=this._t,e=this._k,n=this._i++;return!t||n>=t.length?(this._t=void 0,o(1)):"keys"==e?o(0,n):"values"==e?o(0,t[n]):o(0,[n,t[n]])},"values"),i.Arguments=i.Array,r("keys"),r("values"),r("entries")},function(t,e,n){"use strict";var r=n(14),o={};o[n(0)("toStringTag")]="z",o+""!="[object z]"&&n(8)(Object.prototype,"toString",function(){return"[object "+r(this)+"]"},!0)},function(t,e,n){"use strict";var r,o,i,s=n(28),a=n(1),c=n(10),u=n(14),l=n(24),f=n(11),h=n(13),d=n(39),p=n(41),v=n(58),y=n(31).set,m=n(50)(),g="Promise",w=a.TypeError,b=a.process,_=a[g],b=a.process,S="process"==u(b),x=function(){},E=!!function(){try{var t=_.resolve(1),e=(t.constructor={})[n(0)("species")]=function(t){t(x,x)};return(S||"function"==typeof PromiseRejectionEvent)&&t.then(x)instanceof e}catch(t){}}(),k=function(t,e){return t===e||t===_&&e===i},T=function(t){var e;return!(!f(t)||"function"!=typeof(e=t.then))&&e},O=function(t){return k(_,t)?new C(t):new o(t)},C=o=function(t){var e,n;this.promise=new t(function(t,r){if(void 0!==e||void 0!==n)throw w("Bad Promise constructor");e=t,n=r}),this.resolve=h(e),this.reject=h(n)},A=function(t){try{t()}catch(t){return{error:t}}},P=function(t,e){if(!t._n){t._n=!0;var n=t._c;m(function(){for(var r=t._v,o=1==t._s,i=0,s=function(e){var n,i,s=o?e.ok:e.fail,a=e.resolve,c=e.reject,u=e.domain;try{s?(o||(2==t._h&&F(t),t._h=1),s===!0?n=r:(u&&u.enter(),n=s(r),u&&u.exit()),n===e.promise?c(w("Promise-chain cycle")):(i=T(n))?i.call(n,a,c):a(n)):c(r)}catch(t){c(t)}};n.length>i;)s(n[i++]);t._c=[],t._n=!1,e&&!t._h&&j(t)})}},j=function(t){y.call(a,function(){var e,n,r,o=t._v;if(M(t)&&(e=A(function(){S?b.emit("unhandledRejection",o,t):(n=a.onunhandledrejection)?n({promise:t,reason:o}):(r=a.console)&&r.error&&r.error("Unhandled promise rejection",o)}),t._h=S||M(t)?2:1),t._a=void 0,e)throw e.error})},M=function t(e){if(1==e._h)return!1;for(var n,r=e._a||e._c,o=0;r.length>o;)if(n=r[o++],n.fail||!t(n.promise))return!1;return!0},F=function(t){y.call(a,function(){var e;S?b.emit("rejectionHandled",t):(e=a.onrejectionhandled)&&e({promise:t,reason:t._v})})},L=function(t){var e=this;e._d||(e._d=!0,e=e._w||e,e._v=t,e._s=2,e._a||(e._a=e._c.slice()),P(e,!0))},N=function t(e){var n,r=this;if(!r._d){r._d=!0,r=r._w||r;try{if(r===e)throw w("Promise can't be resolved itself");(n=T(e))?m(function(){var o={_w:r,_d:!1};try{n.call(e,c(t,o,1),c(L,o,1))}catch(t){L.call(o,t)}}):(r._v=e,r._s=1,P(r,!1))}catch(t){L.call({_w:r,_d:!1},t)}}};E||(_=function(t){d(this,_,g,"_h"),h(t),r.call(this);try{t(c(N,this,1),c(L,this,1))}catch(t){L.call(this,t)}},r=function(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1},r.prototype=n(56)(_.prototype,{then:function(t,e){var n=O(v(this,_));return n.ok="function"!=typeof t||t,n.fail="function"==typeof e&&e,n.domain=S?b.domain:void 0,this._c.push(n),this._a&&this._a.push(n),this._s&&P(this,!1),n.promise},catch:function(t){return this.then(void 0,t)}}),C=function(){var t=new r;this.promise=t,this.resolve=c(N,t,1),this.reject=c(L,t,1)}),l(l.G+l.W+l.F*!E,{Promise:_}),n(17)(_,g),n(57)(g),i=n(4)[g],l(l.S+l.F*!E,g,{reject:function(t){var e=O(this),n=e.reject;return n(t),e.promise}}),l(l.S+l.F*(s||!E),g,{resolve:function(t){if(t instanceof _&&k(t.constructor,this))return t;var e=O(this),n=e.resolve;return n(t),e.promise}}),l(l.S+l.F*!(E&&n(48)(function(t){_.all(t).catch(x)})),g,{all:function(t){var e=this,n=O(e),r=n.resolve,o=n.reject,i=A(function(){var n=[],i=0,s=1;p(t,!1,function(t){var a=i++,c=!1;n.push(void 0),s++,e.resolve(t).then(function(t){c||(c=!0,n[a]=t,--s||r(n))},o)}),--s||r(n)});return i&&o(i.error),n.promise},race:function(t){var e=this,n=O(e),r=n.reject,o=A(function(){p(t,!1,function(t){e.resolve(t).then(n.resolve,r)})});return o&&r(o.error),n.promise}})},function(t,e,n){"use strict";var r=n(59)(!0);n(27)(String,"String",function(t){this._t=String(t),this._i=0},function(){var t,e=this._t,n=this._i;return n>=e.length?{value:void 0,done:!0}:(t=r(e,n),this._i+=t.length,{value:t,done:!1})})},function(t,e,n){"use strict";for(var r=n(64),o=n(8),i=n(1),s=n(3),a=n(7),c=n(0),u=c("iterator"),l=c("toStringTag"),f=a.Array,h=["NodeList","DOMTokenList","MediaList","StyleSheetList","CSSRuleList"],d=0;d<5;d++){var p,v=h[d],y=i[v],m=y&&y.prototype;if(m){m[u]||s(m,u,f),m[l]||s(m,l,v),a[v]=f;for(p in r)m[p]||o(m,p,r[p],!0)}}},function(t,e,n){"use strict";var r,o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};!function(){function i(t,e){function n(t,e){return function(){return t.apply(e,arguments)}}var r;if(e=e||{},this.trackingClick=!1,this.trackingClickStart=0,this.targetElement=null,this.touchStartX=0,this.touchStartY=0,this.lastTouchIdentifier=0,this.touchBoundary=e.touchBoundary||10,this.layer=t,this.tapDelay=e.tapDelay||200,this.tapTimeout=e.tapTimeout||700,!i.notNeeded(t)){for(var o=["onMouse","onClick","onTouchStart","onTouchMove","onTouchEnd","onTouchCancel"],s=this,c=0,u=o.length;c<u;c++)s[o[c]]=n(s[o[c]],s);a&&(t.addEventListener("mouseover",this.onMouse,!0),t.addEventListener("mousedown",this.onMouse,!0),t.addEventListener("mouseup",this.onMouse,!0)),t.addEventListener("click",this.onClick,!0),t.addEventListener("touchstart",this.onTouchStart,!1),t.addEventListener("touchmove",this.onTouchMove,!1),t.addEventListener("touchend",this.onTouchEnd,!1),t.addEventListener("touchcancel",this.onTouchCancel,!1),Event.prototype.stopImmediatePropagation||(t.removeEventListener=function(e,n,r){var o=Node.prototype.removeEventListener;"click"===e?o.call(t,e,n.hijacked||n,r):o.call(t,e,n,r)},t.addEventListener=function(e,n,r){var o=Node.prototype.addEventListener;"click"===e?o.call(t,e,n.hijacked||(n.hijacked=function(t){
+t.propagationStopped||n(t)}),r):o.call(t,e,n,r)}),"function"==typeof t.onclick&&(r=t.onclick,t.addEventListener("click",function(t){r(t)},!1),t.onclick=null)}}var s=navigator.userAgent.indexOf("Windows Phone")>=0,a=navigator.userAgent.indexOf("Android")>0&&!s,c=/iP(ad|hone|od)/.test(navigator.userAgent)&&!s,u=c&&/OS 4_\d(_\d)?/.test(navigator.userAgent),l=c&&/OS [6-7]_\d/.test(navigator.userAgent),f=navigator.userAgent.indexOf("BB10")>0;i.prototype.needsClick=function(t){switch(t.nodeName.toLowerCase()){case"button":case"select":case"textarea":if(t.disabled)return!0;break;case"input":if(c&&"file"===t.type||t.disabled)return!0;break;case"label":case"iframe":case"video":return!0}return/\bneedsclick\b/.test(t.className)},i.prototype.needsFocus=function(t){switch(t.nodeName.toLowerCase()){case"textarea":return!0;case"select":return!a;case"input":switch(t.type){case"button":case"checkbox":case"file":case"image":case"radio":case"submit":return!1}return!t.disabled&&!t.readOnly;default:return/\bneedsfocus\b/.test(t.className)}},i.prototype.sendClick=function(t,e){var n,r;document.activeElement&&document.activeElement!==t&&document.activeElement.blur(),r=e.changedTouches[0],n=document.createEvent("MouseEvents"),n.initMouseEvent(this.determineEventType(t),!0,!0,window,1,r.screenX,r.screenY,r.clientX,r.clientY,!1,!1,!1,!1,0,null),n.forwardedTouchEvent=!0,t.dispatchEvent(n)},i.prototype.determineEventType=function(t){return a&&"select"===t.tagName.toLowerCase()?"mousedown":"click"},i.prototype.focus=function(t){var e;c&&t.setSelectionRange&&0!==t.type.indexOf("date")&&"time"!==t.type&&"month"!==t.type?(e=t.value.length,t.setSelectionRange(e,e)):t.focus()},i.prototype.updateScrollParent=function(t){var e,n;if(e=t.fastClickScrollParent,!e||!e.contains(t)){n=t;do{if(n.scrollHeight>n.offsetHeight){e=n,t.fastClickScrollParent=n;break}n=n.parentElement}while(n)}e&&(e.fastClickLastScrollTop=e.scrollTop)},i.prototype.getTargetElementFromEventTarget=function(t){return t.nodeType===Node.TEXT_NODE?t.parentNode:t},i.prototype.onTouchStart=function(t){var e,n,r;if(t.targetTouches.length>1)return!0;if(e=this.getTargetElementFromEventTarget(t.target),n=t.targetTouches[0],c){if(r=window.getSelection(),r.rangeCount&&!r.isCollapsed)return!0;if(!u){if(n.identifier&&n.identifier===this.lastTouchIdentifier)return t.preventDefault(),!1;this.lastTouchIdentifier=n.identifier,this.updateScrollParent(e)}}return this.trackingClick=!0,this.trackingClickStart=t.timeStamp,this.targetElement=e,this.touchStartX=n.pageX,this.touchStartY=n.pageY,t.timeStamp-this.lastClickTime<this.tapDelay&&t.preventDefault(),!0},i.prototype.touchHasMoved=function(t){var e=t.changedTouches[0],n=this.touchBoundary;return Math.abs(e.pageX-this.touchStartX)>n||Math.abs(e.pageY-this.touchStartY)>n},i.prototype.onTouchMove=function(t){return!this.trackingClick||((this.targetElement!==this.getTargetElementFromEventTarget(t.target)||this.touchHasMoved(t))&&(this.trackingClick=!1,this.targetElement=null),!0)},i.prototype.findControl=function(t){return void 0!==t.control?t.control:t.htmlFor?document.getElementById(t.htmlFor):t.querySelector("button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea")},i.prototype.onTouchEnd=function(t){var e,n,r,o,i,s=this.targetElement;if(!this.trackingClick)return!0;if(t.timeStamp-this.lastClickTime<this.tapDelay)return this.cancelNextClick=!0,!0;if(t.timeStamp-this.trackingClickStart>this.tapTimeout)return!0;if(this.cancelNextClick=!1,this.lastClickTime=t.timeStamp,n=this.trackingClickStart,this.trackingClick=!1,this.trackingClickStart=0,l&&(i=t.changedTouches[0],s=document.elementFromPoint(i.pageX-window.pageXOffset,i.pageY-window.pageYOffset)||s,s.fastClickScrollParent=this.targetElement.fastClickScrollParent),r=s.tagName.toLowerCase(),"label"===r){if(e=this.findControl(s)){if(this.focus(s),a)return!1;s=e}}else if(this.needsFocus(s))return t.timeStamp-n>100||c&&window.top!==window&&"input"===r?(this.targetElement=null,!1):(this.focus(s),this.sendClick(s,t),c&&"select"===r||(this.targetElement=null,t.preventDefault()),!1);return!(!c||u||(o=s.fastClickScrollParent,!o||o.fastClickLastScrollTop===o.scrollTop))||(this.needsClick(s)||(t.preventDefault(),this.sendClick(s,t)),!1)},i.prototype.onTouchCancel=function(){this.trackingClick=!1,this.targetElement=null},i.prototype.onMouse=function(t){return!this.targetElement||(!!t.forwardedTouchEvent||(!t.cancelable||(!(!this.needsClick(this.targetElement)||this.cancelNextClick)||(t.stopImmediatePropagation?t.stopImmediatePropagation():t.propagationStopped=!0,t.stopPropagation(),t.preventDefault(),!1))))},i.prototype.onClick=function(t){var e;return this.trackingClick?(this.targetElement=null,this.trackingClick=!1,!0):"submit"===t.target.type&&0===t.detail||(e=this.onMouse(t),e||(this.targetElement=null),e)},i.prototype.destroy=function(){var t=this.layer;a&&(t.removeEventListener("mouseover",this.onMouse,!0),t.removeEventListener("mousedown",this.onMouse,!0),t.removeEventListener("mouseup",this.onMouse,!0)),t.removeEventListener("click",this.onClick,!0),t.removeEventListener("touchstart",this.onTouchStart,!1),t.removeEventListener("touchmove",this.onTouchMove,!1),t.removeEventListener("touchend",this.onTouchEnd,!1),t.removeEventListener("touchcancel",this.onTouchCancel,!1)},i.notNeeded=function(t){var e,n,r,o;if("undefined"==typeof window.ontouchstart)return!0;if(n=+(/Chrome\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1]){if(!a)return!0;if(e=document.querySelector("meta[name=viewport]")){if(e.content.indexOf("user-scalable=no")!==-1)return!0;if(n>31&&document.documentElement.scrollWidth<=window.outerWidth)return!0}}if(f&&(r=navigator.userAgent.match(/Version\/([0-9]*)\.([0-9]*)/),r[1]>=10&&r[2]>=3&&(e=document.querySelector("meta[name=viewport]")))){if(e.content.indexOf("user-scalable=no")!==-1)return!0;if(document.documentElement.scrollWidth<=window.outerWidth)return!0}return"none"===t.style.msTouchAction||"manipulation"===t.style.touchAction||(o=+(/Firefox\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1],!!(o>=27&&(e=document.querySelector("meta[name=viewport]"),e&&(e.content.indexOf("user-scalable=no")!==-1||document.documentElement.scrollWidth<=window.outerWidth)))||("none"===t.style.touchAction||"manipulation"===t.style.touchAction))},i.attach=function(t,e){return new i(t,e)},"object"===o(n(33))&&n(33)?(r=function(){return i}.call(e,n,e,t),!(void 0!==r&&(t.exports=r))):"undefined"!=typeof t&&t.exports?(t.exports=i.attach,t.exports.FastClick=i):window.FastClick=i}()},function(t,e,n){"use strict";var r,o,i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};!function(s){var a=!1;if(r=s,o="function"==typeof r?r.call(e,n,e,t):r,!(void 0!==o&&(t.exports=o)),a=!0,"object"===i(e)&&(t.exports=s(),a=!0),!a){var c=window.Cookies,u=window.Cookies=s();u.noConflict=function(){return window.Cookies=c,u}}}(function(){function t(){for(var t=0,e={};t<arguments.length;t++){var n=arguments[t];for(var r in n)e[r]=n[r]}return e}function e(n){function r(e,o,i){var s;if("undefined"!=typeof document){if(arguments.length>1){if(i=t({path:"/"},r.defaults,i),"number"==typeof i.expires){var a=new Date;a.setMilliseconds(a.getMilliseconds()+864e5*i.expires),i.expires=a}try{s=JSON.stringify(o),/^[\{\[]/.test(s)&&(o=s)}catch(t){}return o=n.write?n.write(o,e):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),e=encodeURIComponent(String(e)),e=e.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),e=e.replace(/[\(\)]/g,escape),document.cookie=[e,"=",o,i.expires?"; expires="+i.expires.toUTCString():"",i.path?"; path="+i.path:"",i.domain?"; domain="+i.domain:"",i.secure?"; secure":""].join("")}e||(s={});for(var c=document.cookie?document.cookie.split("; "):[],u=/(%[0-9A-Z]{2})+/g,l=0;l<c.length;l++){var f=c[l].split("="),h=f.slice(1).join("=");'"'===h.charAt(0)&&(h=h.slice(1,-1));try{var d=f[0].replace(u,decodeURIComponent);if(h=n.read?n.read(h,d):n(h,d)||h.replace(u,decodeURIComponent),this.json)try{h=JSON.parse(h)}catch(t){}if(e===d){s=h;break}e||(s[d]=h)}catch(t){}}return s}}return r.set=r,r.get=function(t){return r.call(r,t)},r.getJSON=function(){return r.apply({json:!0},[].slice.call(arguments))},r.defaults={},r.remove=function(e,n){r(e,"",t(n,{expires:-1}))},r.withConverter=e,r}return e(function(){})})},function(t,e,n){"use strict";var r,o;"function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};!function(){var i=function t(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};i.version="0.7.2",i.utils={},i.utils.warn=function(t){return function(e){t.console&&console.warn&&console.warn(e)}}(this),i.utils.asString=function(t){return void 0===t||null===t?"":t.toString()},i.EventEmitter=function(){this.events={}},i.EventEmitter.prototype.addListener=function(){var t=Array.prototype.slice.call(arguments),e=t.pop(),n=t;if("function"!=typeof e)throw new TypeError("last argument must be a function");n.forEach(function(t){this.hasHandler(t)||(this.events[t]=[]),this.events[t].push(e)},this)},i.EventEmitter.prototype.removeListener=function(t,e){if(this.hasHandler(t)){var n=this.events[t].indexOf(e);this.events[t].splice(n,1),this.events[t].length||delete this.events[t]}},i.EventEmitter.prototype.emit=function(t){if(this.hasHandler(t)){var e=Array.prototype.slice.call(arguments,1);this.events[t].forEach(function(t){t.apply(void 0,e)})}},i.EventEmitter.prototype.hasHandler=function(t){return t in this.events},i.tokenizer=function(t){if(!arguments.length||null==t||void 0==t)return[];if(Array.isArray(t))return t.map(function(t){return i.utils.asString(t).toLowerCase()});var e=i.tokenizer.seperator||i.tokenizer.separator;return t.toString().trim().toLowerCase().split(e)},i.tokenizer.seperator=!1,i.tokenizer.separator=/[\s\-]+/,i.tokenizer.load=function(t){var e=this.registeredFunctions[t];if(!e)throw new Error("Cannot load un-registered function: "+t);return e},i.tokenizer.label="default",i.tokenizer.registeredFunctions={default:i.tokenizer},i.tokenizer.registerFunction=function(t,e){e in this.registeredFunctions&&i.utils.warn("Overwriting existing tokenizer: "+e),t.label=e,this.registeredFunctions[e]=t},i.Pipeline=function(){this._stack=[]},i.Pipeline.registeredFunctions={},i.Pipeline.registerFunction=function(t,e){e in this.registeredFunctions&&i.utils.warn("Overwriting existing registered function: "+e),t.label=e,i.Pipeline.registeredFunctions[t.label]=t},i.Pipeline.warnIfFunctionNotRegistered=function(t){var e=t.label&&t.label in this.registeredFunctions;e||i.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",t)},i.Pipeline.load=function(t){var e=new i.Pipeline;return t.forEach(function(t){var n=i.Pipeline.registeredFunctions[t];if(!n)throw new Error("Cannot load un-registered function: "+t);e.add(n)}),e},i.Pipeline.prototype.add=function(){var t=Array.prototype.slice.call(arguments);t.forEach(function(t){i.Pipeline.warnIfFunctionNotRegistered(t),this._stack.push(t)},this)},i.Pipeline.prototype.after=function(t,e){i.Pipeline.warnIfFunctionNotRegistered(e);var n=this._stack.indexOf(t);if(n==-1)throw new Error("Cannot find existingFn");n+=1,this._stack.splice(n,0,e)},i.Pipeline.prototype.before=function(t,e){i.Pipeline.warnIfFunctionNotRegistered(e);var n=this._stack.indexOf(t);if(n==-1)throw new Error("Cannot find existingFn");this._stack.splice(n,0,e)},i.Pipeline.prototype.remove=function(t){var e=this._stack.indexOf(t);e!=-1&&this._stack.splice(e,1)},i.Pipeline.prototype.run=function(t){for(var e=[],n=t.length,r=this._stack.length,o=0;o<n;o++){for(var i=t[o],s=0;s<r&&(i=this._stack[s](i,o,t),void 0!==i&&""!==i);s++);void 0!==i&&""!==i&&e.push(i)}return e},i.Pipeline.prototype.reset=function(){this._stack=[]},i.Pipeline.prototype.toJSON=function(){return this._stack.map(function(t){return i.Pipeline.warnIfFunctionNotRegistered(t),t.label})},i.Vector=function(){this._magnitude=null,this.list=void 0,this.length=0},i.Vector.Node=function(t,e,n){this.idx=t,this.val=e,this.next=n},i.Vector.prototype.insert=function(t,e){this._magnitude=void 0;var n=this.list;if(!n)return this.list=new i.Vector.Node(t,e,n),this.length++;if(t<n.idx)return this.list=new i.Vector.Node(t,e,n),this.length++;for(var r=n,o=n.next;void 0!=o;){if(t<o.idx)return r.next=new i.Vector.Node(t,e,o),this.length++;r=o,o=o.next}return r.next=new i.Vector.Node(t,e,o),this.length++},i.Vector.prototype.magnitude=function(){if(this._magnitude)return this._magnitude;for(var t,e=this.list,n=0;e;)t=e.val,n+=t*t,e=e.next;return this._magnitude=Math.sqrt(n)},i.Vector.prototype.dot=function(t){for(var e=this.list,n=t.list,r=0;e&&n;)e.idx<n.idx?e=e.next:e.idx>n.idx?n=n.next:(r+=e.val*n.val,e=e.next,n=n.next);return r},i.Vector.prototype.similarity=function(t){return this.dot(t)/(this.magnitude()*t.magnitude())},i.SortedSet=function(){this.length=0,this.elements=[]},i.SortedSet.load=function(t){var e=new this;return e.elements=t,e.length=t.length,e},i.SortedSet.prototype.add=function(){var t,e;for(t=0;t<arguments.length;t++)e=arguments[t],~this.indexOf(e)||this.elements.splice(this.locationFor(e),0,e);this.length=this.elements.length},i.SortedSet.prototype.toArray=function(){return this.elements.slice()},i.SortedSet.prototype.map=function(t,e){return this.elements.map(t,e)},i.SortedSet.prototype.forEach=function(t,e){return this.elements.forEach(t,e)},i.SortedSet.prototype.indexOf=function(t){for(var e=0,n=this.elements.length,r=n-e,o=e+Math.floor(r/2),i=this.elements[o];r>1;){if(i===t)return o;i<t&&(e=o),i>t&&(n=o),r=n-e,o=e+Math.floor(r/2),i=this.elements[o]}return i===t?o:-1},i.SortedSet.prototype.locationFor=function(t){for(var e=0,n=this.elements.length,r=n-e,o=e+Math.floor(r/2),i=this.elements[o];r>1;)i<t&&(e=o),i>t&&(n=o),r=n-e,o=e+Math.floor(r/2),i=this.elements[o];return i>t?o:i<t?o+1:void 0},i.SortedSet.prototype.intersect=function(t){for(var e=new i.SortedSet,n=0,r=0,o=this.length,s=t.length,a=this.elements,c=t.elements;;){if(n>o-1||r>s-1)break;a[n]!==c[r]?a[n]<c[r]?n++:a[n]>c[r]&&r++:(e.add(a[n]),n++,r++)}return e},i.SortedSet.prototype.clone=function(){var t=new i.SortedSet;return t.elements=this.toArray(),t.length=t.elements.length,t},i.SortedSet.prototype.union=function(t){var e,n,r;this.length>=t.length?(e=this,n=t):(e=t,n=this),r=e.clone();for(var o=0,i=n.toArray();o<i.length;o++)r.add(i[o]);return r},i.SortedSet.prototype.toJSON=function(){return this.toArray()},i.Index=function(){this._fields=[],this._ref="id",this.pipeline=new i.Pipeline,this.documentStore=new i.Store,this.tokenStore=new i.TokenStore,this.corpusTokens=new i.SortedSet,this.eventEmitter=new i.EventEmitter,this.tokenizerFn=i.tokenizer,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},i.Index.prototype.on=function(){var t=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,t)},i.Index.prototype.off=function(t,e){return this.eventEmitter.removeListener(t,e)},i.Index.load=function(t){t.version!==i.version&&i.utils.warn("version mismatch: current "+i.version+" importing "+t.version);var e=new this;return e._fields=t.fields,e._ref=t.ref,e.tokenizer(i.tokenizer.load(t.tokenizer)),e.documentStore=i.Store.load(t.documentStore),e.tokenStore=i.TokenStore.load(t.tokenStore),e.corpusTokens=i.SortedSet.load(t.corpusTokens),e.pipeline=i.Pipeline.load(t.pipeline),e},i.Index.prototype.field=function(t,e){var e=e||{},n={name:t,boost:e.boost||1};return this._fields.push(n),this},i.Index.prototype.ref=function(t){return this._ref=t,this},i.Index.prototype.tokenizer=function(t){var e=t.label&&t.label in i.tokenizer.registeredFunctions;return e||i.utils.warn("Function is not a registered tokenizer. This may cause problems when serialising the index"),this.tokenizerFn=t,this},i.Index.prototype.add=function(t,e){var n={},r=new i.SortedSet,o=t[this._ref],e=void 0===e||e;this._fields.forEach(function(e){var o=this.pipeline.run(this.tokenizerFn(t[e.name]));n[e.name]=o;for(var i=0;i<o.length;i++){var s=o[i];r.add(s),this.corpusTokens.add(s)}},this),this.documentStore.set(o,r);for(var s=0;s<r.length;s++){for(var a=r.elements[s],c=0,u=0;u<this._fields.length;u++){var l=this._fields[u],f=n[l.name],h=f.length;if(h){for(var d=0,p=0;p<h;p++)f[p]===a&&d++;c+=d/h*l.boost}}this.tokenStore.add(a,{ref:o,tf:c})}e&&this.eventEmitter.emit("add",t,this)},i.Index.prototype.remove=function(t,e){var n=t[this._ref],e=void 0===e||e;if(this.documentStore.has(n)){var r=this.documentStore.get(n);this.documentStore.remove(n),r.forEach(function(t){this.tokenStore.remove(t,n)},this),e&&this.eventEmitter.emit("remove",t,this)}},i.Index.prototype.update=function(t,e){var e=void 0===e||e;this.remove(t,!1),this.add(t,!1),e&&this.eventEmitter.emit("update",t,this)},i.Index.prototype.idf=function(t){var e="@"+t;if(Object.prototype.hasOwnProperty.call(this._idfCache,e))return this._idfCache[e];var n=this.tokenStore.count(t),r=1;return n>0&&(r=1+Math.log(this.documentStore.length/n)),this._idfCache[e]=r},i.Index.prototype.search=function(t){var e=this.pipeline.run(this.tokenizerFn(t)),n=new i.Vector,r=[],o=this._fields.reduce(function(t,e){return t+e.boost},0),s=e.some(function(t){return this.tokenStore.has(t)},this);if(!s)return[];e.forEach(function(t,e,s){var a=1/s.length*this._fields.length*o,c=this,u=this.tokenStore.expand(t).reduce(function(e,r){var o=c.corpusTokens.indexOf(r),s=c.idf(r),u=1,l=new i.SortedSet;if(r!==t){var f=Math.max(3,r.length-t.length);u=1/Math.log(f)}o>-1&&n.insert(o,a*s*u);for(var h=c.tokenStore.get(r),d=Object.keys(h),p=d.length,v=0;v<p;v++)l.add(h[d[v]].ref);return e.union(l)},new i.SortedSet);r.push(u)},this);var a=r.reduce(function(t,e){return t.intersect(e)});return a.map(function(t){return{ref:t,score:n.similarity(this.documentVector(t))}},this).sort(function(t,e){return e.score-t.score})},i.Index.prototype.documentVector=function(t){for(var e=this.documentStore.get(t),n=e.length,r=new i.Vector,o=0;o<n;o++){var s=e.elements[o],a=this.tokenStore.get(s)[t].tf,c=this.idf(s);r.insert(this.corpusTokens.indexOf(s),a*c)}return r},i.Index.prototype.toJSON=function(){return{version:i.version,fields:this._fields,ref:this._ref,tokenizer:this.tokenizerFn.label,documentStore:this.documentStore.toJSON(),tokenStore:this.tokenStore.toJSON(),corpusTokens:this.corpusTokens.toJSON(),pipeline:this.pipeline.toJSON()}},i.Index.prototype.use=function(t){var e=Array.prototype.slice.call(arguments,1);e.unshift(this),t.apply(this,e)},i.Store=function(){this.store={},this.length=0},i.Store.load=function(t){var e=new this;return e.length=t.length,e.store=Object.keys(t.store).reduce(function(e,n){return e[n]=i.SortedSet.load(t.store[n]),e},{}),e},i.Store.prototype.set=function(t,e){this.has(t)||this.length++,this.store[t]=e},i.Store.prototype.get=function(t){return this.store[t]},i.Store.prototype.has=function(t){return t in this.store},i.Store.prototype.remove=function(t){this.has(t)&&(delete this.store[t],this.length--)},i.Store.prototype.toJSON=function(){return{store:this.store,length:this.length}},i.stemmer=function(){var t={ational:"ate",tional:"tion",enci:"ence",anci:"ance",izer:"ize",bli:"ble",alli:"al",entli:"ent",eli:"e",ousli:"ous",ization:"ize",ation:"ate",ator:"ate",alism:"al",iveness:"ive",fulness:"ful",ousness:"ous",aliti:"al",iviti:"ive",biliti:"ble",logi:"log"},e={icate:"ic",ative:"",alize:"al",iciti:"ic",ical:"ic",ful:"",ness:""},n="[^aeiou]",r="[aeiouy]",o=n+"[^aeiouy]*",i=r+"[aeiou]*",s="^("+o+")?"+i+o,a="^("+o+")?"+i+o+"("+i+")?$",c="^("+o+")?"+i+o+i+o,u="^("+o+")?"+r,l=new RegExp(s),f=new RegExp(c),h=new RegExp(a),d=new RegExp(u),p=/^(.+?)(ss|i)es$/,v=/^(.+?)([^s])s$/,y=/^(.+?)eed$/,m=/^(.+?)(ed|ing)$/,g=/.$/,w=/(at|bl|iz)$/,b=new RegExp("([^aeiouylsz])\\1$"),_=new RegExp("^"+o+r+"[^aeiouwxy]$"),S=/^(.+?[^aeiou])y$/,x=/^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/,E=/^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/,k=/^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/,T=/^(.+?)(s|t)(ion)$/,O=/^(.+?)e$/,C=/ll$/,A=new RegExp("^"+o+r+"[^aeiouwxy]$"),P=function(n){var r,o,i,s,a,c,u;if(n.length<3)return n;if(i=n.substr(0,1),"y"==i&&(n=i.toUpperCase()+n.substr(1)),s=p,a=v,s.test(n)?n=n.replace(s,"$1$2"):a.test(n)&&(n=n.replace(a,"$1$2")),s=y,a=m,s.test(n)){var P=s.exec(n);s=l,s.test(P[1])&&(s=g,n=n.replace(s,""))}else if(a.test(n)){var P=a.exec(n);r=P[1],a=d,a.test(r)&&(n=r,a=w,c=b,u=_,a.test(n)?n+="e":c.test(n)?(s=g,n=n.replace(s,"")):u.test(n)&&(n+="e"))}if(s=S,s.test(n)){var P=s.exec(n);r=P[1],n=r+"i"}if(s=x,s.test(n)){var P=s.exec(n);r=P[1],o=P[2],s=l,s.test(r)&&(n=r+t[o])}if(s=E,s.test(n)){var P=s.exec(n);r=P[1],o=P[2],s=l,s.test(r)&&(n=r+e[o])}if(s=k,a=T,s.test(n)){var P=s.exec(n);r=P[1],s=f,s.test(r)&&(n=r)}else if(a.test(n)){var P=a.exec(n);r=P[1]+P[2],a=f,a.test(r)&&(n=r)}if(s=O,s.test(n)){var P=s.exec(n);r=P[1],s=f,a=h,c=A,(s.test(r)||a.test(r)&&!c.test(r))&&(n=r)}return s=C,a=f,s.test(n)&&a.test(n)&&(s=g,n=n.replace(s,"")),"y"==i&&(n=i.toLowerCase()+n.substr(1)),n};return P}(),i.Pipeline.registerFunction(i.stemmer,"stemmer"),i.generateStopWordFilter=function(t){var e=t.reduce(function(t,e){return t[e]=e,t},{});return function(t){if(t&&e[t]!==t)return t}},i.stopWordFilter=i.generateStopWordFilter(["a","able","about","across","after","all","almost","also","am","among","an","and","any","are","as","at","be","because","been","but","by","can","cannot","could","dear","did","do","does","either","else","ever","every","for","from","get","got","had","has","have","he","her","hers","him","his","how","however","i","if","in","into","is","it","its","just","least","let","like","likely","may","me","might","most","must","my","neither","no","nor","not","of","off","often","on","only","or","other","our","own","rather","said","say","says","she","should","since","so","some","than","that","the","their","them","then","there","these","they","this","tis","to","too","twas","us","wants","was","we","were","what","when","where","which","while","who","whom","why","will","with","would","yet","you","your"]),i.Pipeline.registerFunction(i.stopWordFilter,"stopWordFilter"),i.trimmer=function(t){return t.replace(/^\W+/,"").replace(/\W+$/,"")},i.Pipeline.registerFunction(i.trimmer,"trimmer"),i.TokenStore=function(){this.root={docs:{}},this.length=0},i.TokenStore.load=function(t){var e=new this;return e.root=t.root,e.length=t.length,e},i.TokenStore.prototype.add=function(t,e,n){var n=n||this.root,r=t.charAt(0),o=t.slice(1);return r in n||(n[r]={docs:{}}),0===o.length?(n[r].docs[e.ref]=e,void(this.length+=1)):this.add(o,e,n[r])},i.TokenStore.prototype.has=function(t){if(!t)return!1;for(var e=this.root,n=0;n<t.length;n++){if(!e[t.charAt(n)])return!1;e=e[t.charAt(n)]}return!0},i.TokenStore.prototype.getNode=function(t){if(!t)return{};for(var e=this.root,n=0;n<t.length;n++){if(!e[t.charAt(n)])return{};e=e[t.charAt(n)]}return e},i.TokenStore.prototype.get=function(t,e){return this.getNode(t,e).docs||{}},i.TokenStore.prototype.count=function(t,e){return Object.keys(this.get(t,e)).length},i.TokenStore.prototype.remove=function(t,e){if(t){for(var n=this.root,r=0;r<t.length;r++){if(!(t.charAt(r)in n))return;n=n[t.charAt(r)]}delete n.docs[e]}},i.TokenStore.prototype.expand=function(t,e){var n=this.getNode(t),r=n.docs||{},e=e||[];return Object.keys(r).length&&e.push(t),Object.keys(n).forEach(function(n){"docs"!==n&&e.concat(this.expand(t+n,e))},this),e},i.TokenStore.prototype.toJSON=function(){return{root:this.root,length:this.length}},function(i,s){r=s,o="function"==typeof r?r.call(e,n,e,t):r,!(void 0!==o&&(t.exports=o))}(this,function(){return i})}()},function(t,e,n){"use strict";var r=n(73),o=n(76),i=n(80),s=n(83),a=n(85);e.a={Event:r.a,Nav:o.a,Search:i.a,Sidebar:s.a,Source:a.a}},function(t,e,n){"use strict";var r=n(74),o=n(75);e.a={Listener:r.a,MatchMedia:o.a}},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e,n,o){var i=this;r(this,t),this.els_="string"==typeof e?document.querySelectorAll(e):[].concat(e),this.handler_="function"==typeof o?{update:o}:o,this.events_=[].concat(n),this.update_=function(t){return i.handler_.update(t)}}return o(t,[{key:"listen",value:function(){var t=this;Array.prototype.forEach.call(this.els_,function(e){t.events_.forEach(function(n){e.addEventListener(n,t.update_,!1)})}),"function"==typeof this.handler_.setup&&this.handler_.setup()}},{key:"unlisten",value:function(){var t=this;Array.prototype.forEach.call(this.els_,function(e){t.events_.forEach(function(n){e.removeEventListener(n,t.update_)})}),"function"==typeof this.handler_.reset&&this.handler_.reset()}}]),t}();e.a=i},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function t(e,n){r(this,t),this.handler_=function(t){t.matches?n.listen():n.unlisten()};var o=window.matchMedia(e);o.addListener(this.handler_),this.handler_(o)};e.a=o},function(t,e,n){"use strict";var r=n(77),o=n(78),i=n(79);e.a={Blur:r.a,Collapse:o.a,Scrolling:i.a}},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e){r(this,t),this.els_="string"==typeof e?document.querySelectorAll(e):e,this.index_=0,this.offset_=window.pageYOffset,this.dir_=!1,this.anchors_=[].map.call(this.els_,function(t){return document.getElementById(t.hash.substring(1))})}return o(t,[{key:"setup",value:function(){this.update()}},{key:"update",value:function(){var t=window.pageYOffset,e=this.offset_-t<0;if(this.dir_!==e&&(this.index_=e?this.index_=0:this.index_=this.els_.length-1),0!==this.anchors_.length){if(this.offset_<=t)for(var n=this.index_+1;n<this.els_.length&&this.anchors_[n].offsetTop-80<=t;n++)n>0&&(this.els_[n-1].dataset.mdState="blur"),this.index_=n;else for(var r=this.index_;r>=0;r--){if(!(this.anchors_[r].offsetTop-80>t)){this.index_=r;break}r>0&&(this.els_[r-1].dataset.mdState="")}this.offset_=t,this.dir_=e}}},{key:"reset",value:function(){Array.prototype.forEach.call(this.els_,function(t){t.dataset.mdState=""}),this.index_=0,this.offset_=window.pageYOffset}}]),t}();e.a=i},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e){r(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return o(t,[{key:"update",value:function(){var t=this,e=this.el_.getBoundingClientRect().height;e?(this.el_.style.maxHeight=e+"px",requestAnimationFrame(function(){t.el_.setAttribute("data-md-state","animate"),t.el_.style.maxHeight="0px"})):!function(){t.el_.setAttribute("data-md-state","expand"),t.el_.style.maxHeight="";var e=t.el_.getBoundingClientRect().height;t.el_.removeAttribute("data-md-state"),t.el_.style.maxHeight="0px",requestAnimationFrame(function(){t.el_.setAttribute("data-md-state","animate"),t.el_.style.maxHeight=e+"px"})}();var n=function t(e){e.target.removeAttribute("data-md-state"),e.target.style.maxHeight="",e.target.removeEventListener("transitionend",t)};this.el_.addEventListener("transitionend",n,!1)}},{key:"reset",value:function(){this.el_.dataset.mdState="",this.el_.style.maxHeight=""}}]),t}();e.a=i},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e){r(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return o(t,[{key:"setup",value:function(){this.el_.children[1].style.webkitOverflowScrolling="touch";var t=this.el_.querySelectorAll("[data-md-toggle]");Array.prototype.forEach.call(t,function(t){if(t.checked){for(var e=t.nextElementSibling;"NAV"!==e.tagName;)e=e.nextElementSibling;var n=t.parentNode.parentNode,r=e.children[e.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling="touch"}})}},{key:"update",value:function(t){for(var e=t.target.nextElementSibling;"NAV"!==e.tagName;)e=e.nextElementSibling;var n=t.target.parentNode.parentNode,r=e.children[e.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling="",t.target.checked||!function(){var t=function t(){n.style.webkitOverflowScrolling="touch",e.removeEventListener("transitionend",t)};e.addEventListener("transitionend",t,!1)}(),t.target.checked&&!function(){var t=function t(){r.style.webkitOverflowScrolling="touch",e.removeEventListener("transitionend",t,!1)};e.addEventListener("transitionend",t,!1)}()}},{key:"reset",value:function(){this.el_.children[1].style.webkitOverflowScrolling="";var t=this.el_.querySelectorAll("[data-md-toggle]");Array.prototype.forEach.call(t,function(t){if(t.checked){for(var e=t.nextElementSibling;"NAV"!==e.tagName;)e=e.nextElementSibling;var n=t.parentNode.parentNode,r=e.children[e.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling=""}})}}]),t}();e.a=i},function(t,e,n){"use strict";var r=n(81),o=n(82);e.a={Lock:r.a,Result:o.a}},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e){r(this,t),this.el_="string"==typeof e?document.querySelector(e):e}return o(t,[{key:"setup",value:function(){this.update()}},{key:"update",value:function(){var t=this;this.el_.checked?(this.offset_=window.pageYOffset,setTimeout(function(){window.scrollTo(0,0),t.el_.checked&&(document.body.dataset.mdState="lock")},400)):(document.body.dataset.mdState="",setTimeout(function(){"undefined"!=typeof t.offset_&&window.scrollTo(0,t.offset_)},100))}},{key:"reset",value:function(){"lock"===document.body.dataset.mdState&&window.scrollTo(0,this.offset_),document.body.dataset.mdState=""}}]),t}();e.a=i},function(t,e,n){"use strict";(function(t){function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=n(71),i=n.n(o),s=function(){function t(t,e){var n=[],r=!0,o=!1,i=void 0;try{for(var s,a=t[Symbol.iterator]();!(r=(s=a.next()).done)&&(n.push(s.value),!e||n.length!==e);r=!0);}catch(t){o=!0,i=t}finally{try{!r&&a.return&&a.return()}finally{if(o)throw i}}return n}return function(e,n){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return t(e,n);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}(),a=function(){function t(t,e){
+for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),c=function(){function e(n,o){r(this,e),this.el_="string"==typeof n?document.querySelector(n):n,this.data_=o,this.meta_=t.createElement("div",{class:"md-search-result__meta"},"Type to start searching"),this.list_=t.createElement("ol",{class:"md-search-result__list"}),this.el_.appendChild(this.meta_),this.el_.appendChild(this.list_),this.truncate_=function(t,e){var n=e;if(t.length>n){for(;" "!==t[n]&&--n>0;);return t.substring(0,n)+"..."}return t}}return a(e,[{key:"update",value:function(e){var n=this;if("focus"!==e.type||this.index_){if("keyup"===e.type){for(;this.list_.firstChild;)this.list_.removeChild(this.list_.firstChild);var r=this.index_.search(e.target.value);r.forEach(function(e){var r=n.data_[e.ref],o=r.location.split("#"),i=s(o,1),a=i[0];a=a.replace(/^(\/?\.{2})+/g,""),n.list_.appendChild(t.createElement("li",{class:"md-search-result__item"},t.createElement("a",{href:r.location,title:r.title,class:"md-search-result__link","data-md-rel":a===document.location.pathname?"anchor":""},t.createElement("article",{class:"md-search-result__article"},t.createElement("h1",{class:"md-search-result__title"},r.title),t.createElement("p",{class:"md-search-result__teaser"},n.truncate_(r.text,140))))))});var o=this.list_.querySelectorAll("[data-md-rel=anchor]");Array.prototype.forEach.call(o,function(t){t.addEventListener("click",function(e){var n=document.querySelector("[data-md-toggle=search]");n.checked&&(n.checked=!1,n.dispatchEvent(new CustomEvent("change"))),e.preventDefault(),setTimeout(function(){document.location.href=t.href},100)})}),this.meta_.textContent=r.length+" search result"+(1!==r.length?"s":"")}}else!function(){var t=function(t){n.index_=i()(function(){this.field("title",{boost:10}),this.field("text"),this.ref("location")}),n.data_=t.reduce(function(t,e){return n.index_.add(e),t[e.location]=e,t},{})};setTimeout(function(){return"function"==typeof n.data_?n.data_().then(t):t(n.data_)},250)}()}}]),e}();e.a=c}).call(e,n(22))},function(t,e,n){"use strict";var r=n(84);e.a={Position:r.a}},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),i=function(){function t(e){r(this,t),this.el_="string"==typeof e?document.querySelector(e):e,this.parent_=this.el_.parentNode,this.height_=0}return o(t,[{key:"setup",value:function(){this.offset_=this.el_.offsetTop-this.parent_.offsetTop,this.update()}},{key:"update",value:function(){var t=window.pageYOffset,e=window.innerHeight;this.bounds_={top:this.parent_.offsetTop,bottom:this.parent_.offsetTop+this.parent_.offsetHeight};var n=e-this.bounds_.top-Math.max(0,this.offset_-t)-Math.max(0,t+e-this.bounds_.bottom);n!==this.height_&&(this.el_.style.height=(this.height_=n)+"px"),t>=this.offset_?"lock"!==this.el_.dataset.mdState&&(this.el_.dataset.mdState="lock"):"lock"===this.el_.dataset.mdState&&(this.el_.dataset.mdState="")}},{key:"reset",value:function(){this.el_.dataset.mdState="",this.el_.style.height="",this.height_=0}}]),t}();e.a=i},function(t,e,n){"use strict";var r=n(86),o=n(89);e.a={Adapter:r.a,Repository:o.a}},function(t,e,n){"use strict";var r=n(88);e.a={GitHub:r.a}},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var o=n(70),i=n.n(o),s=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),a=function(){function t(e){r(this,t),this.el_="string"==typeof e?document.querySelector(e):e,this.base_=this.el_.href,this.salt_=this.hash_(this.base_)}return s(t,[{key:"fetch",value:function(){var t=this;return new Promise(function(e){var n=i.a.getJSON(t.salt_+".cache-source");"undefined"!=typeof n?e(n):t.fetch_().then(function(n){i.a.set(t.salt_+".cache-source",n,{expires:1/96}),e(n)})})}},{key:"fetch_",value:function(){throw new Error("fetch_(): Not implemented")}},{key:"format_",value:function(t){return t>1e4?(t/1e3).toFixed(0)+"k":t>1e3?(t/1e3).toFixed(1)+"k":t}},{key:"hash_",value:function(t){var e=0;if(0===t.length)return e;for(var n=0,r=t.length;n<r;n++)e=(e<<5)-e+t.charCodeAt(n),e|=0;return e}}]),t}();e.a=a},function(t,e,n){"use strict";function r(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function o(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e}function i(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)}var s=n(87),a=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),c=function(t){function e(t){r(this,e);var n=o(this,(e.__proto__||Object.getPrototypeOf(e)).call(this,t));return n.base_=n.base_.replace("github.com/","api.github.com/repos/"),n}return i(e,t),a(e,[{key:"fetch_",value:function(){var t=this;return fetch(this.base_).then(function(t){return t.json()}).then(function(e){return[t.format_(e.stargazers_count)+" Stars",t.format_(e.forks_count)+" Forks"]})}}]),e}(s.a);e.a=c},function(t,e,n){"use strict";(function(t){function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var r=function(){function t(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(e,n,r){return n&&t(e.prototype,n),r&&t(e,r),e}}(),o=function(){function e(t){n(this,e),this.el_="string"==typeof t?document.querySelector(t):t}return r(e,[{key:"initialize",value:function(e){e.length&&this.el_.children[this.el_.children.length-1].appendChild(t.createElement("ul",{class:"md-source__facts"},e.map(function(e){return t.createElement("li",{class:"md-source__fact"},e)}))),this.el_.dataset.mdState="done"}}]),e}();e.a=o}).call(e,n(22))},function(t,e,n){n(34),n(35),n(36),t.exports=n(37)}]);
\ No newline at end of file
diff --git a/material/assets/javascripts/modernizr-56ade86843.js b/material/assets/javascripts/modernizr-56ade86843.js
new file mode 100644
index 0000000000000000000000000000000000000000..cffa5835298d48afae768989c8fc17bc9500a191
--- /dev/null
+++ b/material/assets/javascripts/modernizr-56ade86843.js
@@ -0,0 +1 @@
+!function(e,n,t){function r(e,n){return typeof e===n}function o(){var e,n,t,o,i,s,f;for(var a in w)if(w.hasOwnProperty(a)){if(e=[],n=w[a],n.name&&(e.push(n.name.toLowerCase()),n.options&&n.options.aliases&&n.options.aliases.length))for(t=0;t<n.options.aliases.length;t++)e.push(n.options.aliases[t].toLowerCase());for(o=r(n.fn,"function")?n.fn():n.fn,i=0;i<e.length;i++)s=e[i],f=s.split("."),1===f.length?_[f[0]]=o:(!_[f[0]]||_[f[0]]instanceof Boolean||(_[f[0]]=new Boolean(_[f[0]])),_[f[0]][f[1]]=o),x.push((o?"":"no-")+f.join("-"))}}function i(e){var n=b.className,t=_._config.classPrefix||"";if(P&&(n=n.baseVal),_._config.enableJSClass){var r=new RegExp("(^|\\s)"+t+"no-js(\\s|$)");n=n.replace(r,"$1"+t+"js$2")}_._config.enableClasses&&(n+=" "+t+e.join(" "+t),P?b.className.baseVal=n:b.className=n)}function s(e,n){if("object"==typeof e)for(var t in e)S(e,t)&&s(t,e[t]);else{e=e.toLowerCase();var r=e.split("."),o=_[r[0]];if(2==r.length&&(o=o[r[1]]),"undefined"!=typeof o)return _;n="function"==typeof n?n():n,1==r.length?_[r[0]]=n:(!_[r[0]]||_[r[0]]instanceof Boolean||(_[r[0]]=new Boolean(_[r[0]])),_[r[0]][r[1]]=n),i([(n&&0!=n?"":"no-")+r.join("-")]),_._trigger(e,n)}return _}function f(e,n){return!!~(""+e).indexOf(n)}function a(){return"function"!=typeof n.createElement?n.createElement(arguments[0]):P?n.createElementNS.call(n,"http://www.w3.org/2000/svg",arguments[0]):n.createElement.apply(n,arguments)}function l(){var e=n.body;return e||(e=a(P?"svg":"body"),e.fake=!0),e}function u(e,t,r,o){var i,s,f,u,p="modernizr",d=a("div"),c=l();if(parseInt(r,10))for(;r--;)f=a("div"),f.id=o?o[r]:p+(r+1),d.appendChild(f);return i=a("style"),i.type="text/css",i.id="s"+p,(c.fake?c:d).appendChild(i),c.appendChild(d),i.styleSheet?i.styleSheet.cssText=e:i.appendChild(n.createTextNode(e)),d.id=p,c.fake&&(c.style.background="",c.style.overflow="hidden",u=b.style.overflow,b.style.overflow="hidden",b.appendChild(c)),s=t(d,e),c.fake?(c.parentNode.removeChild(c),b.style.overflow=u,b.offsetHeight):d.parentNode.removeChild(d),!!s}function p(e){return e.replace(/([A-Z])/g,function(e,n){return"-"+n.toLowerCase()}).replace(/^ms-/,"-ms-")}function d(n,r){var o=n.length;if("CSS"in e&&"supports"in e.CSS){for(;o--;)if(e.CSS.supports(p(n[o]),r))return!0;return!1}if("CSSSupportsRule"in e){for(var i=[];o--;)i.push("("+p(n[o])+":"+r+")");return i=i.join(" or "),u("@supports ("+i+") { #modernizr { position: absolute; } }",function(e){return"absolute"==getComputedStyle(e,null).position})}return t}function c(e){return e.replace(/([a-z])-([a-z])/g,function(e,n,t){return n+t.toUpperCase()}).replace(/^-/,"")}function h(e,n,o,i){function s(){u&&(delete E.style,delete E.modElem)}if(i=!r(i,"undefined")&&i,!r(o,"undefined")){var l=d(e,o);if(!r(l,"undefined"))return l}for(var u,p,h,m,v,y=["modernizr","tspan"];!E.style;)u=!0,E.modElem=a(y.shift()),E.style=E.modElem.style;for(h=e.length,p=0;p<h;p++)if(m=e[p],v=E.style[m],f(m,"-")&&(m=c(m)),E.style[m]!==t){if(i||r(o,"undefined"))return s(),"pfx"!=n||m;try{E.style[m]=o}catch(e){}if(E.style[m]!=v)return s(),"pfx"!=n||m}return s(),!1}function m(e,n){return function(){return e.apply(n,arguments)}}function v(e,n,t){var o;for(var i in e)if(e[i]in n)return t===!1?e[i]:(o=n[e[i]],r(o,"function")?m(o,t||n):o);return!1}function y(e,n,t,o,i){var s=e.charAt(0).toUpperCase()+e.slice(1),f=(e+" "+z.join(s+" ")+s).split(" ");return r(n,"string")||r(n,"undefined")?h(f,n,o,i):(f=(e+" "+k.join(s+" ")+s).split(" "),v(f,n,t))}function g(e,n,r){return y(e,t,t,n,r)}var w=[],C={_version:"3.3.1",_config:{classPrefix:"",enableClasses:!0,enableJSClass:!0,usePrefixes:!0},_q:[],on:function(e,n){var t=this;setTimeout(function(){n(t[e])},0)},addTest:function(e,n,t){w.push({name:e,fn:n,options:t})},addAsyncTest:function(e){w.push({name:null,fn:e})}},_=function(){};_.prototype=C,_=new _;var S,x=[],b=n.documentElement,P="svg"===b.nodeName.toLowerCase();!function(){var e={}.hasOwnProperty;S=r(e,"undefined")||r(e.call,"undefined")?function(e,n){return n in e&&r(e.constructor.prototype[n],"undefined")}:function(n,t){return e.call(n,t)}}(),C._l={},C.on=function(e,n){this._l[e]||(this._l[e]=[]),this._l[e].push(n),_.hasOwnProperty(e)&&setTimeout(function(){_._trigger(e,_[e])},0)},C._trigger=function(e,n){if(this._l[e]){var t=this._l[e];setTimeout(function(){var e,r;for(e=0;e<t.length;e++)(r=t[e])(n)},0),delete this._l[e]}},_._q.push(function(){C.addTest=s});var T="Moz O ms Webkit",z=C._config.usePrefixes?T.split(" "):[];C._cssomPrefixes=z;var j={elem:a("modernizr")};_._q.push(function(){delete j.elem});var E={style:j.elem.style};_._q.unshift(function(){delete E.style});var k=C._config.usePrefixes?T.toLowerCase().split(" "):[];C._domPrefixes=k,C.testAllProps=y,C.testAllProps=g;var N=C.testStyles=u,q="CSS"in e&&"supports"in e.CSS,A="supportsCSS"in e;_.addTest("supports",q||A),_.addTest("csstransforms3d",function(){var e=!!g("perspective","1px",!0),n=_._config.usePrefixes;if(e&&(!n||"webkitPerspective"in b.style)){var t,r="#modernizr{width:0;height:0}";_.supports?t="@supports (perspective: 1px)":(t="@media (transform-3d)",n&&(t+=",(-webkit-transform-3d)")),t+="{#modernizr{width:7px;height:18px;margin:0;padding:0;border:0}}",N(r+t,function(n){e=7===n.offsetWidth&&18===n.offsetHeight})}return e}),o(),i(x),delete C.addTest,delete C.addAsyncTest;for(var L=0;L<_._q.length;L++)_._q[L]();e.Modernizr=_}(window,document);
\ No newline at end of file
diff --git a/material/assets/javascripts/modernizr-facb31f4a3.js b/material/assets/javascripts/modernizr-facb31f4a3.js
deleted file mode 100644
index 922da1f62d7e3760aaca7bd221620bf7cab00f36..0000000000000000000000000000000000000000
--- a/material/assets/javascripts/modernizr-facb31f4a3.js
+++ /dev/null
@@ -1 +0,0 @@
-!function(e,t,n){function s(e,t){return typeof e===t}function r(){var e,t,n,r,o,i,a;for(var f in w)if(w.hasOwnProperty(f)){if(e=[],t=w[f],t.name&&(e.push(t.name.toLowerCase()),t.options&&t.options.aliases&&t.options.aliases.length))for(n=0;n<t.options.aliases.length;n++)e.push(t.options.aliases[n].toLowerCase());for(r=s(t.fn,"function")?t.fn():t.fn,o=0;o<e.length;o++)i=e[o],a=i.split("."),1===a.length?_[a[0]]=r:(!_[a[0]]||_[a[0]]instanceof Boolean||(_[a[0]]=new Boolean(_[a[0]])),_[a[0]][a[1]]=r),S.push((r?"":"no-")+a.join("-"))}}function o(e){var t=b.className,n=_._config.classPrefix||"";if(T&&(t=t.baseVal),_._config.enableJSClass){var s=new RegExp("(^|\\s)"+n+"no-js(\\s|$)");t=t.replace(s,"$1"+n+"js$2")}_._config.enableClasses&&(t+=" "+n+e.join(" "+n),T?b.className.baseVal=t:b.className=t)}function i(e,t){if("object"==typeof e)for(var n in e)C(e,n)&&i(n,e[n]);else{e=e.toLowerCase();var s=e.split("."),r=_[s[0]];if(2==s.length&&(r=r[s[1]]),"undefined"!=typeof r)return _;t="function"==typeof t?t():t,1==s.length?_[s[0]]=t:(!_[s[0]]||_[s[0]]instanceof Boolean||(_[s[0]]=new Boolean(_[s[0]])),_[s[0]][s[1]]=t),o([(t&&0!=t?"":"no-")+s.join("-")]),_._trigger(e,t)}return _}function a(){return"function"!=typeof t.createElement?t.createElement(arguments[0]):T?t.createElementNS.call(t,"http://www.w3.org/2000/svg",arguments[0]):t.createElement.apply(t,arguments)}function f(){var e=t.body;return e||(e=a(T?"svg":"body"),e.fake=!0),e}function l(e,n,s,r){var o,i,l,u,c="modernizr",p=a("div"),d=f();if(parseInt(s,10))for(;s--;)l=a("div"),l.id=r?r[s]:c+(s+1),p.appendChild(l);return o=a("style"),o.type="text/css",o.id="s"+c,(d.fake?d:p).appendChild(o),d.appendChild(p),o.styleSheet?o.styleSheet.cssText=e:o.appendChild(t.createTextNode(e)),p.id=c,d.fake&&(d.style.background="",d.style.overflow="hidden",u=b.style.overflow,b.style.overflow="hidden",b.appendChild(d)),i=n(p,e),d.fake?(d.parentNode.removeChild(d),b.style.overflow=u,b.offsetHeight):p.parentNode.removeChild(p),!!i}function u(e,t){return!!~(""+e).indexOf(t)}function c(e){return e.replace(/([A-Z])/g,function(e,t){return"-"+t.toLowerCase()}).replace(/^ms-/,"-ms-")}function p(t,s){var r=t.length;if("CSS"in e&&"supports"in e.CSS){for(;r--;)if(e.CSS.supports(c(t[r]),s))return!0;return!1}if("CSSSupportsRule"in e){for(var o=[];r--;)o.push("("+c(t[r])+":"+s+")");return o=o.join(" or "),l("@supports ("+o+") { #modernizr { position: absolute; } }",function(e){return"absolute"==getComputedStyle(e,null).position})}return n}function d(e){return e.replace(/([a-z])-([a-z])/g,function(e,t,n){return t+n.toUpperCase()}).replace(/^-/,"")}function h(e,t,r,o){function i(){l&&(delete A.style,delete A.modElem)}if(o=!s(o,"undefined")&&o,!s(r,"undefined")){var f=p(e,r);if(!s(f,"undefined"))return f}for(var l,c,h,m,g,v=["modernizr","tspan"];!A.style;)l=!0,A.modElem=a(v.shift()),A.style=A.modElem.style;for(h=e.length,c=0;c<h;c++)if(m=e[c],g=A.style[m],u(m,"-")&&(m=d(m)),A.style[m]!==n){if(o||s(r,"undefined"))return i(),"pfx"!=t||m;try{A.style[m]=r}catch(e){}if(A.style[m]!=g)return i(),"pfx"!=t||m}return i(),!1}function m(e,t){return function(){return e.apply(t,arguments)}}function g(e,t,n){var r;for(var o in e)if(e[o]in t)return n===!1?e[o]:(r=t[e[o]],s(r,"function")?m(r,n||t):r);return!1}function v(e,t,n,r,o){var i=e.charAt(0).toUpperCase()+e.slice(1),a=(e+" "+k.join(i+" ")+i).split(" ");return s(t,"string")||s(t,"undefined")?h(a,t,r,o):(a=(e+" "+R.join(i+" ")+i).split(" "),g(a,t,n))}function y(e,t,s){return v(e,n,n,t,s)}var w=[],x={_version:"3.3.1",_config:{classPrefix:"",enableClasses:!0,enableJSClass:!0,usePrefixes:!0},_q:[],on:function(e,t){var n=this;setTimeout(function(){t(n[e])},0)},addTest:function(e,t,n){w.push({name:e,fn:t,options:n})},addAsyncTest:function(e){w.push({name:null,fn:e})}},_=function(){};_.prototype=x,_=new _;var C,S=[],b=t.documentElement,T="svg"===b.nodeName.toLowerCase();!function(){var e={}.hasOwnProperty;C=s(e,"undefined")||s(e.call,"undefined")?function(e,t){return t in e&&s(e.constructor.prototype[t],"undefined")}:function(t,n){return e.call(t,n)}}(),x._l={},x.on=function(e,t){this._l[e]||(this._l[e]=[]),this._l[e].push(t),_.hasOwnProperty(e)&&setTimeout(function(){_._trigger(e,_[e])},0)},x._trigger=function(e,t){if(this._l[e]){var n=this._l[e];setTimeout(function(){var e,s;for(e=0;e<n.length;e++)(s=n[e])(t)},0),delete this._l[e]}},_._q.push(function(){x.addTest=i});var P=x._config.usePrefixes?" -webkit- -moz- -o- -ms- ".split(" "):[];x._prefixes=P,_.addTest("csscalc",function(){var e="width:",t="calc(10px);",n=a("a");return n.style.cssText=e+P.join(t+e),!!n.style.length});var z=x.testStyles=l,E=function(){var e=navigator.userAgent,t=e.match(/applewebkit\/([0-9]+)/gi)&&parseFloat(RegExp.$1),n=e.match(/w(eb)?osbrowser/gi),s=e.match(/windows phone/gi)&&e.match(/iemobile\/([0-9])+/gi)&&parseFloat(RegExp.$1)>=9,r=t<533&&e.match(/android/gi);return n||r||s}();E?_.addTest("fontface",!1):z('@font-face {font-family:"font";src:url("https://")}',function(e,n){var s=t.getElementById("smodernizr"),r=s.sheet||s.styleSheet,o=r?r.cssRules&&r.cssRules[0]?r.cssRules[0].cssText:r.cssText||"":"",i=/src/i.test(o)&&0===o.indexOf(n.split(" ")[0]);_.addTest("fontface",i)});var j="Moz O ms Webkit",k=x._config.usePrefixes?j.split(" "):[];x._cssomPrefixes=k;var N={elem:a("modernizr")};_._q.push(function(){delete N.elem});var A={style:N.elem.style};_._q.unshift(function(){delete A.style});var R=x._config.usePrefixes?j.toLowerCase().split(" "):[];x._domPrefixes=R,x.testAllProps=v,x.testAllProps=y;var q="CSS"in e&&"supports"in e.CSS,L="supportsCSS"in e;_.addTest("supports",q||L),_.addTest("csstransforms3d",function(){var e=!!y("perspective","1px",!0),t=_._config.usePrefixes;if(e&&(!t||"webkitPerspective"in b.style)){var n,s="#modernizr{width:0;height:0}";_.supports?n="@supports (perspective: 1px)":(n="@media (transform-3d)",t&&(n+=",(-webkit-transform-3d)")),n+="{#modernizr{width:7px;height:18px;margin:0;padding:0;border:0}}",z(s+n,function(t){e=7===t.offsetWidth&&18===t.offsetHeight})}return e}),r(),o(S),delete x.addTest,delete x.addAsyncTest;for(var O=0;O<_._q.length;O++)_._q[O]();e.Modernizr=_}(window,document);
\ No newline at end of file
diff --git a/material/assets/stylesheets/application-9c62e3c932.css b/material/assets/stylesheets/application-9c62e3c932.css
new file mode 100644
index 0000000000000000000000000000000000000000..7a2236d4b6887e1bf78d1731eb8e6611cd8a657c
--- /dev/null
+++ b/material/assets/stylesheets/application-9c62e3c932.css
@@ -0,0 +1 @@
+html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}html{-webkit-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}a:active,a:hover{outline-width:0}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:collapse;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{padding:0;background:transparent;font-size:inherit}button,input{border:0;outline:0}.admonition:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__edit,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before{display:inline-block;margin:.4rem;padding:.8rem;font-size:2.4rem;cursor:pointer}.md-icon--arrow-back:before{content:"arrow_back"}.md-icon--arrow-forward:before{content:"arrow_forward"}.md-icon--menu:before{content:"menu"}.md-icon--search:before{content:"search"}.md-icon--home:before{content:"school"}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","onum","liga";font-feature-settings:"kern","onum","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif;font-weight:400}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","onum","liga";font-feature-settings:"kern","onum","liga";font-family:Courier New,Courier,monospace;font-weight:400}.md-typeset{font-size:1.6rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 4rem;color:rgba(0,0,0,.54);font-size:3.125rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:4rem 0 1.6rem;font-size:2.5rem;line-height:1.4}.md-typeset h3{margin:3.2rem 0 1.6rem;font-size:2rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:1.6rem}.md-typeset h4{font-size:1.6rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:1.6rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.1rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,93%,.5);color:#37474f;font-size:85%}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.2rem;box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:none;background-color:inherit;color:inherit;box-shadow:none}.md-typeset pre{margin:1em 0;padding:1rem 1.2rem;border-radius:.2rem;line-height:1.4;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset pre::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre>code{margin:0;background-color:transparent;font-size:inherit;box-shadow:none;-webkit-box-decoration-break:none;box-decoration-break:none}.md-typeset kbd{padding:0 .29412em;border:.1rem solid #c9c9c9;border-radius:.2rem;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;box-shadow:0 .1rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;background-color:rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.1rem dotted rgba(0,0,0,.54);cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}.md-typeset blockquote{padding-left:1.2rem;border-left:.4rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);margin:2em 0;border-radius:.2rem;font-size:1.28rem;overflow:hidden}.no-js .md-typeset table:not([class]){display:inline-block;max-width:100%;margin:.8em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}.md-typeset table:not([class]) th{min-width:10rem;padding:1.2rem 1.6rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:1.2rem 1.6rem;border-top:.1rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset .md-typeset__table{margin:1.6em -1.6rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table table{display:inline-block;margin:0 1.6rem}html{font-size:62.5%}body,html{height:100%}body{position:relative}hr{display:block;height:.1rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:122rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:5.6rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{min-height:100%;padding-top:3rem;overflow:auto}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:2}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}@page{margin:25mm}.md-content__inner{margin:2.4rem 1.6rem}.md-content__edit{float:right}.md-header{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:fixed;top:0;right:0;left:0;height:5.6rem;-webkit-transition:background-color .25s;transition:background-color .25s;background-color:#3f51b5;color:#fff;z-index:1}.md-header-nav{padding:.4rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo img{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__title{padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-header-nav__parent{color:hsla(0,0%,100%,.7)}.md-header-nav__parent:after{display:inline;color:hsla(0,0%,100%,.3);content:"/"}.md-header-nav__source{display:none}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.4rem;overflow:auto}.md-footer-nav__link{padding-top:2.8rem;padding-bottom:.8rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}.md-footer-nav__link--next{width:75%;float:right;text-align:right}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-2rem;padding:0 2rem;color:hsla(0,0%,100%,.7);font-size:1.5rem}.md-footer-meta{background:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.4rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}.md-footer-copyright{margin:0 1.2rem;padding:.8rem 0;color:hsla(0,0%,100%,.3);font-size:1.28rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .8rem;padding:.4rem 0 1.2rem}.md-footer-social__link{display:inline-block;width:3.2rem;height:3.2rem;border:.1rem solid hsla(0,0%,100%,.12);border-radius:100%;color:hsla(0,0%,100%,.7);font-size:1.6rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:1.4rem;line-height:1.3}.md-nav--secondary{-webkit-transition:border-left .25s;transition:border-left .25s;border-left:.4rem solid #3f51b5}.md-nav__title{display:block;padding:1.2rem 1.2rem 0;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"arrow_back"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:.625em 1.2rem 0}.md-nav__item:last-child{padding-bottom:1.2rem}.md-nav__item .md-nav__item{padding-right:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"keyboard_arrow_down"}html .md-nav__link[for=toc],html .md-nav__link[for=toc]+.md-nav__link:after,html .md-nav__link[for=toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link--active,.md-nav__link:active{color:#3f51b5}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{display:none;pointer-events:none}.md-search__inner{width:100%}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 1.6rem 0 7.2rem;text-overflow:ellipsis;z-index:1}.md-search__input+.md-search__icon,.md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input+.md-search__icon,.md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}.md-search__input+.md-search__icon,.md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input+.md-search__icon,.md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;top:.8rem;left:1.2rem;-webkit-transition:color .25s;transition:color .25s;font-size:2.4rem;cursor:pointer;z-index:1}.md-search__icon:before{content:"search"}.md-search__output{position:absolute;width:100%;border-radius:0 0 .2rem .2rem;overflow:hidden}.md-search__scrollwrap{height:100%;background:-webkit-linear-gradient(top,#fff 10%,hsla(0,0%,100%,0)),-webkit-linear-gradient(top,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background:linear-gradient(180deg,#fff 10%,hsla(0,0%,100%,0)),linear-gradient(180deg,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background-attachment:local,scroll;background-color:#fff;background-repeat:no-repeat;background-size:100% 2rem,100% 1rem;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result__meta{padding:0 1.6rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:1.28rem;line-height:4rem}.md-search-result__list{margin:0;padding:0;border-top:.1rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{box-shadow:0 -.1rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;padding:0 1.6rem;-webkit-transition:background .25s;transition:background .25s;overflow:auto}.md-search-result__link:hover{background-color:rgba(83,109,254,.1)}.md-search-result__article{margin:1em 0}.md-search-result__title{margin-top:.5em;margin-bottom:0;color:rgba(0,0,0,.87);font-size:1.6rem;font-weight:400;line-height:1.4}.md-search-result__teaser{margin:.5em 0;color:rgba(0,0,0,.54);font-size:1.28rem;line-height:1.4;word-break:break-word}.md-sidebar{position:relative;width:24.2rem;padding:2.4rem 0;float:left;overflow:visible}.md-sidebar[data-md-state=lock]{position:fixed;top:5.6rem;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .4rem;overflow-y:auto}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes a{0%{height:0}to{height:1.3rem}}@keyframes a{0%{height:0}to{height:1.3rem}}@-webkit-keyframes b{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes b{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:1.3rem;line-height:1.2;white-space:nowrap}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:4.8rem;content:"";vertical-align:middle}.md-source__icon{width:4.8rem}.md-source__icon svg{margin-top:1.2rem;margin-left:1.2rem}.md-source__icon+.md-source__repository{margin-left:-4.4rem;padding-left:4rem}.md-source__repository{display:inline-block;max-width:100%;margin-left:1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:1.1rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:a .25s ease-in;animation:a .25s ease-in}.md-source__fact{float:left}[data-md-state=done] .md-source__fact{-webkit-animation:b .4s ease-out;animation:b .4s ease-out}.md-source__fact:before{margin:0 .2rem;content:"\00B7"}.md-source__fact:first-child:before{display:none}.admonition{position:relative;margin:1.5625em 0;padding:.8rem 1.2rem;border-left:3.2rem solid rgba(68,138,255,.4);border-radius:.2rem;background-color:rgba(68,138,255,.15);font-size:1.28rem}.admonition:before{position:absolute;left:-2.6rem;color:#fff;font-size:2rem;content:"edit";vertical-align:-.25em}.admonition :first-child{margin-top:0}.admonition :last-child{margin-bottom:0}.admonition.summary,.admonition.tldr{border-color:rgba(0,176,255,.4);background-color:rgba(0,176,255,.15)}.admonition.summary:before,.admonition.tldr:before{content:"subject"}.admonition.hint,.admonition.important,.admonition.tip{border-color:rgba(0,191,165,.4);background-color:rgba(0,191,165,.15)}.admonition.hint:before,.admonition.important:before,.admonition.tip:before{content:"whatshot"}.admonition.check,.admonition.done,.admonition.success{border-color:rgba(0,230,118,.4);background-color:rgba(0,230,118,.15)}.admonition.check:before,.admonition.done:before,.admonition.success:before{content:"done"}.admonition.attention,.admonition.caution,.admonition.warning{border-color:rgba(255,145,0,.4);background-color:rgba(255,145,0,.15)}.admonition.attention:before,.admonition.caution:before,.admonition.warning:before{content:"warning"}.admonition.fail,.admonition.failure,.admonition.missing{border-color:rgba(255,82,82,.4);background-color:rgba(255,82,82,.15)}.admonition.fail:before,.admonition.failure:before,.admonition.missing:before{content:"clear"}.admonition.danger,.admonition.error{border-color:rgba(255,23,68,.4);background-color:rgba(255,23,68,.15)}.admonition.danger:before,.admonition.error:before{content:"flash_on"}.admonition.bug{border-color:rgba(245,0,87,.4);background-color:rgba(245,0,87,.15)}.admonition.bug:before{content:"bug_report"}.admonition-title{font-weight:700}html .admonition-title{margin-bottom:0}html .admonition-title+*{margin-top:0}.codehilite .o,.codehilite .ow{color:inherit}.codehilite .ge{color:#000}.codehilite .gr{color:#a00}.codehilite .gh{color:#999}.codehilite .go{color:#888}.codehilite .gp{color:#555}.codehilite .gs{color:inherit}.codehilite .gu{color:#aaa}.codehilite .gt{color:#a00}.codehilite .gd{background-color:#fdd}.codehilite .gi{background-color:#dfd}.codehilite .k{color:#3b78e7}.codehilite .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn{color:#3b78e7}.codehilite .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt{color:#3e61a2}.codehilite .c,.codehilite .cm{color:#999}.codehilite .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs{color:#999}.codehilite .na,.codehilite .nb{color:#c2185b}.codehilite .bp{color:#3e61a2}.codehilite .nc{color:#c2185b}.codehilite .no{color:#3e61a2}.codehilite .nd,.codehilite .ni{color:#666}.codehilite .ne,.codehilite .nf{color:#c2185b}.codehilite .nl{color:#3b5179}.codehilite .nn{color:#ec407a}.codehilite .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi{color:#3e61a2}.codehilite .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc{color:#0d904f}.codehilite .sd{color:#999}.codehilite .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx{color:#183691}.codehilite .sr{color:#009926}.codehilite .s1,.codehilite .ss{color:#0d904f}.codehilite .err{color:#a61717}.codehilite .w{color:transparent}.codehilite .hll{display:block;margin:0 -1.2rem;padding:0 1.2rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite{margin:1em 0;padding:1rem 1.2rem .8rem;border-radius:.2rem;background-color:hsla(0,0%,93%,.5);color:#37474f;line-height:1.4;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .codehilite::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset .codehilite::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset .codehilite pre{display:inline-block;min-width:100%;margin:0;padding:0;background-color:transparent;overflow:visible;vertical-align:top}.md-typeset .codehilitetable{display:block;margin:1em 0;border-radius:.2em;font-size:1.6rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td{display:block;padding:0}.md-typeset .codehilitetable tr{display:-webkit-box;display:-ms-flexbox;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv{padding:1rem 1.2rem .8rem}.md-typeset .codehilitetable .linenodiv,.md-typeset .codehilitetable .linenodiv>pre{height:100%}.md-typeset .codehilitetable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable{box-shadow:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:before{display:block;height:0}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li:target:before{margin-top:-9rem;padding-top:9rem;pointer-events:none}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.5rem);transform:translateX(.5rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}.md-typeset .footnote-backref:before{font-size:1.6rem;content:"keyboard_return"}.md-typeset .headerlink{display:inline-block;margin-left:1rem;-webkit-transform:translateY(.5rem);transform:translateY(.5rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset [id]:before{display:inline-block;content:""}.md-typeset [id]:target:before{margin-top:-9.8rem;padding-top:9.8rem}.md-typeset [id] .headerlink:focus,.md-typeset [id]:hover .headerlink,.md-typeset [id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset [id] .headerlink:focus,.md-typeset [id]:hover .headerlink:hover,.md-typeset [id]:target .headerlink{color:#536dfe}.md-typeset h1[id]{padding-top:.8rem}.md-typeset h1[id].headerlink{display:none}.md-typeset h2[id]:before{display:block;margin-top:-.4rem;padding-top:.4rem}.md-typeset h2[id]:target:before{margin-top:-8.4rem;padding-top:8.4rem}.md-typeset h3[id]:before{display:block;margin-top:-.7rem;padding-top:.7rem}.md-typeset h3[id]:target:before{margin-top:-8.7rem;padding-top:8.7rem}.md-typeset h4[id]:before{display:block;margin-top:-.8rem;padding-top:.8rem}.md-typeset h4[id]:target:before{margin-top:-8.8rem;padding-top:8.8rem}.md-typeset h5[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem}.md-typeset h5[id]:target:before{margin-top:-9.1rem;padding-top:9.1rem}.md-typeset h6[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem}.md-typeset h6[id]:target:before{margin-top:-9.1rem;padding-top:9.1rem}.md-typeset .MJXc-display{margin:.75em 0;padding:.25em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .comment.critic,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,93%,.5);color:#37474f;box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"chat";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:1.6rem;padding-left:1.6rem;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset .emojione{width:2rem;vertical-align:text-top}.md-typeset code.codehilite{margin:0 .29412em;padding:.07353em 0}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.05em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.5em;content:"check_box_outline_blank";vertical-align:-.25em}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"check_box"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code{box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-content__edit,.md-footer,.md-header,.md-sidebar,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -1.6rem;padding:1rem 1.6rem;border-radius:0}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.codehilite .hll{margin:0 -1.6rem;padding:0 1.6rem}.md-typeset>.codehilite{padding:1rem 1.6rem .8rem}.md-typeset>.codehilite,.md-typeset>.codehilitetable{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilitetable .codehilite,.md-typeset>.codehilitetable .linenodiv{padding:1rem 1.6rem}.md-typeset>p>.MJXc-display{margin:.75em -1.6rem;padding:.25em 1.6rem}}@media only screen and (min-width:100em){html{font-size:68.75%}}@media only screen and (min-width:125em){html{font-size:75%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}.md-content__edit{margin-right:-.8rem}.md-nav--secondary{border-left:0}html .md-nav__link[for=toc]{display:block;padding-right:4.8rem}html .md-nav__link[for=toc]:after{color:inherit;content:"toc"}html .md-nav__link[for=toc]+.md-nav__link{display:none}html .md-nav__link[for=toc]~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-nav__source{display:block;padding:.4rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:2rem;background-color:#fff;opacity:0;overflow:hidden;z-index:1}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}.md-search__input{width:100%;height:5.6rem;font-size:1.8rem}.md-search__icon{top:1.6rem;left:1.6rem}.md-search__icon:before{content:"arrow_back"}.md-search__output{top:5.6rem;bottom:0}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:1.6rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:11.2rem;padding:6rem 1.6rem .4rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:4.8rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background:-webkit-linear-gradient(top,#fff 10%,hsla(0,0%,100%,0)),-webkit-linear-gradient(top,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background:linear-gradient(180deg,#fff 10%,hsla(0,0%,100%,0)),linear-gradient(180deg,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background-attachment:local,scroll;background-color:#fff;background-repeat:no-repeat;background-size:100% 2rem,100% 1rem;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.4rem;left:.4rem;width:6.4rem;height:6.4rem;font-size:4.8rem}html .md-nav--primary .md-nav__title--site:before{display:none}.md-nav--primary .md-nav__list{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.1rem solid rgba(0,0,0,.07)}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:4.8rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"keyboard_arrow_right"}.md-nav--primary .md-nav__link{position:relative;padding:1.6rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:1.2rem;margin-top:-1.2rem;color:rgba(0,0,0,.54);font-size:2.4rem}.md-nav--primary .md-nav__link:focus:after,.md-nav--primary .md-nav__link:hover:after{color:inherit}.md-nav--primary .md-nav--secondary .md-nav{position:static}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:2.8rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:4rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:5.2rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:6.4rem}.md-nav__toggle~.md-nav{display:none}.csstransforms3d .md-nav__toggle~.md-nav{-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}.csstransforms3d .md-nav__toggle~.md-nav,.md-nav__toggle:checked~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.csstransforms3d .md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.md-sidebar--primary{position:fixed;top:0;left:-24.2rem;width:24.2rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);background-color:#fff;z-index:2}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(24.2rem);transform:translateX(24.2rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}}@media only screen and (min-width:60em){.md-content{margin-right:24.2rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:23rem;max-width:23rem;padding-right:1.2rem}.md-search{margin-right:2.8rem;padding:.4rem}.md-search__inner{display:table;position:relative;clear:both}.md-search__form{width:23rem;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1);border-radius:.2rem}.md-search__input{width:100%;height:4rem;padding-left:4.8rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;border-radius:.2rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:1.6rem}.md-search__input+.md-search__icon,.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input+.md-search__icon,.md-search__input::-moz-placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input+.md-search__icon,.md-search__input:-ms-input-placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input+.md-search__icon,.md-search__input::placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.2rem .2rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:none}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:4rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__link,.md-search-result__meta{padding-left:4.8rem}.md-sidebar--secondary{display:block;float:right}.md-sidebar--secondary[data-md-state=lock]{margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:24.2rem;overflow:auto}.md-content__inner{margin:2.4rem}.md-content__inner :last-child{margin-bottom:0}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__form{width:68.8rem}.md-sidebar__inner{border-right:.1rem solid rgba(0,0,0,.07)}}@media only screen and (max-width:29.9375em){.md-header-nav__parent{display:none}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}.md-footer-social{padding:1.2rem 0;float:right}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__form{width:46.8rem}}@media only screen and (min-width:60em) and (min-width:76.25em){.md-sidebar--secondary[data-md-state=lock]{margin-left:122rem}}
\ No newline at end of file
diff --git a/material/assets/stylesheets/application-f3ab63f78a.css b/material/assets/stylesheets/application-f3ab63f78a.css
deleted file mode 100644
index 6f3ab671b8bfab22469f28dc3403b166373c7d73..0000000000000000000000000000000000000000
--- a/material/assets/stylesheets/application-f3ab63f78a.css
+++ /dev/null
@@ -1 +0,0 @@
-html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}html{-webkit-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}a:active,a:hover{outline-width:0}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:collapse;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{padding:0;background:transparent;font-size:inherit}button,input{border:0;outline:0}.admonition:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__edit,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before{display:inline-block;margin:.4rem;padding:.8rem;font-size:2.4rem;cursor:pointer}.md-icon--arrow-back:before{content:"arrow_back"}.md-icon--arrow-forward:before{content:"arrow_forward"}.md-icon--menu:before{content:"menu"}.md-icon--search:before{content:"search"}.md-icon--home:before{content:"school"}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","onum","liga";font-feature-settings:"kern","onum","liga";font-weight:400}.no-fontface body,.no-fontface input{font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","onum","liga";font-feature-settings:"kern","onum","liga";font-weight:400}.no-fontface code,.no-fontface kbd,.no-fontface pre{font-family:Courier New,Courier,monospace}.md-typeset{font-size:1.6rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 4rem;color:rgba(0,0,0,.54);font-size:3.125rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:4rem 0 1.6rem;font-size:2.5rem;line-height:1.4}.md-typeset h3{margin:3.2rem 0 1.6rem;font-size:2rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:1.6rem}.md-typeset h4{font-size:1.6rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:1.6rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.1rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,93%,.5);color:#37474f;font-size:85%}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.2rem;box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:none;background-color:inherit;color:inherit;box-shadow:none}.md-typeset pre{margin:1em 0;padding:1rem 1.2rem;border-radius:.2rem;line-height:1.4;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset pre::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre>code{margin:0;background-color:transparent;font-size:inherit;box-shadow:none;-webkit-box-decoration-break:none;box-decoration-break:none}.md-typeset kbd{padding:0 .29412em;border:.1rem solid #c9c9c9;border-radius:.2rem;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;box-shadow:0 .1rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;background-color:rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.1rem dotted rgba(0,0,0,.54);cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}.md-typeset blockquote{padding-left:1.2rem;border-left:.4rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);margin:2em 0;border-radius:.2rem;font-size:1.28rem;overflow:hidden}.no-js .md-typeset table:not([class]){display:inline-block;max-width:100%;margin:.8em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}.md-typeset table:not([class]) th{min-width:10rem;padding:1.2rem 1.6rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:1.2rem 1.6rem;border-top:.1rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset .md-typeset__table{margin:1.6em -1.6rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table table{display:inline-block;margin:0 1.6rem}html{font-size:62.5%}body,html{height:100%}body{position:relative}hr{display:block;height:.1rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:122rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{margin-top:5.6rem;padding-top:3rem;overflow:auto}.csscalc .md-main__inner{min-height:calc(100% - 2.6rem)}@-moz-document url-prefix(){.csscalc .md-main__inner{min-height:calc(100% - 5.6rem)}}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:2}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}@page{margin:25mm}.md-content__inner{margin:2.4rem 1.6rem}.md-content__edit{float:right}.md-header{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:fixed;top:0;right:0;left:0;height:5.6rem;-webkit-transition:background-color .25s;transition:background-color .25s;background-color:#3f51b5;color:#fff;z-index:1}.md-header-nav{padding:.4rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo img{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__title{padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-header-nav__parent{color:hsla(0,0%,100%,.7)}.md-header-nav__parent:after{display:inline;color:hsla(0,0%,100%,.3);content:"/"}.md-header-nav__source{display:none}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.4rem;overflow:auto}.md-footer-nav__link{padding-top:2.8rem;padding-bottom:.8rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}.md-footer-nav__link--next{width:75%;float:right;text-align:right}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-2rem;padding:0 2rem;color:hsla(0,0%,100%,.7);font-size:1.5rem}.md-footer-meta{background:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.4rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}.md-footer-copyright{margin:0 1.2rem;padding:.8rem 0;color:hsla(0,0%,100%,.3);font-size:1.28rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .8rem;padding:.4rem 0 1.2rem}.md-footer-social__link{display:inline-block;width:3.2rem;height:3.2rem;border:.1rem solid hsla(0,0%,100%,.12);border-radius:100%;color:hsla(0,0%,100%,.7);font-size:1.6rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:1.28rem;line-height:1.3}.md-nav--secondary{-webkit-transition:border-left .25s;transition:border-left .25s;border-left:.4rem solid #3f51b5}.md-nav__title{display:block;padding:1.2rem 1.2rem 0;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"arrow_back"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:.625em 1.2rem 0}.md-nav__item:last-child{padding-bottom:1.2rem}.md-nav__item .md-nav__item{padding-right:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"keyboard_arrow_down"}html .md-nav__link[for=toc],html .md-nav__link[for=toc]+.md-nav__link:after,html .md-nav__link[for=toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link--active,.md-nav__link:active{color:#3f51b5}.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{display:none;pointer-events:none}.md-search__inner{width:100%}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 1.6rem 0 7.2rem;text-overflow:ellipsis;z-index:1}.md-search__input+.md-search__icon,.md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input+.md-search__icon,.md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}.md-search__input+.md-search__icon,.md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input+.md-search__icon,.md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;top:.8rem;left:1.2rem;-webkit-transition:color .25s;transition:color .25s;font-size:2.4rem;cursor:pointer;z-index:1}.md-search__icon:before{content:"search"}.md-search__output{position:absolute;width:100%;border-radius:0 0 .2rem .2rem;overflow:hidden}.md-search__scrollwrap{height:100%;background:-webkit-linear-gradient(top,#fff 10%,hsla(0,0%,100%,0)),-webkit-linear-gradient(top,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background:linear-gradient(180deg,#fff 10%,hsla(0,0%,100%,0)),linear-gradient(180deg,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background-attachment:local,scroll;background-color:#fff;background-repeat:no-repeat;background-size:100% 2rem,100% 1rem;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result__meta{padding:0 1.6rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:1.28rem;line-height:4rem}.md-search-result__list{margin:0;padding:0;border-top:.1rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{box-shadow:0 -.1rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;padding:0 1.6rem;-webkit-transition:background .25s;transition:background .25s;overflow:auto}.md-search-result__link:hover{background-color:rgba(83,109,254,.1)}.md-search-result__article{margin:1em 0}.md-search-result__title{margin-top:.5em;margin-bottom:0;color:rgba(0,0,0,.87);font-size:1.6rem;font-weight:400;line-height:1.4}.md-search-result__teaser{margin:.5em 0;color:rgba(0,0,0,.54);font-size:1.28rem;line-height:1.4;word-break:break-word}.md-sidebar{position:relative;width:24.2rem;padding:2.4rem 0;float:left;overflow:visible}.md-sidebar[data-md-state=lock]{position:fixed;top:5.6rem;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .4rem;overflow-y:auto}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes a{0%{height:0}to{height:1.3rem}}@keyframes a{0%{height:0}to{height:1.3rem}}@-webkit-keyframes b{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes b{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:1.3rem;line-height:1.2;white-space:nowrap}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:4.8rem;content:"";vertical-align:middle}.md-source__icon{width:4.8rem}.md-source__icon svg{margin-top:1.2rem;margin-left:1.2rem}.md-source__icon+.md-source__repository{margin-left:-4.4rem;padding-left:4rem}.md-source__repository{display:inline-block;max-width:100%;margin-left:1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:1.1rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:a .25s ease-in;animation:a .25s ease-in}.md-source__fact{float:left}[data-md-state=done] .md-source__fact{-webkit-animation:b .4s ease-out;animation:b .4s ease-out}.md-source__fact:before{margin:0 .2rem;content:"\00B7"}.md-source__fact:first-child:before{display:none}.admonition{position:relative;margin:1.5625em 0;padding:.8rem 1.2rem;border-left:3.2rem solid rgba(68,138,255,.4);border-radius:.2rem;background-color:rgba(68,138,255,.15);font-size:1.28rem}.admonition:before{position:absolute;left:-2.6rem;color:#fff;font-size:2rem;content:"edit";vertical-align:-.25em}.admonition :first-child{margin-top:0}.admonition :last-child{margin-bottom:0}.admonition.summary,.admonition.tldr{border-color:rgba(0,176,255,.4);background-color:rgba(0,176,255,.15)}.admonition.summary:before,.admonition.tldr:before{content:"subject"}.admonition.hint,.admonition.important,.admonition.tip{border-color:rgba(0,191,165,.4);background-color:rgba(0,191,165,.15)}.admonition.hint:before,.admonition.important:before,.admonition.tip:before{content:"whatshot"}.admonition.check,.admonition.done,.admonition.success{border-color:rgba(0,230,118,.4);background-color:rgba(0,230,118,.15)}.admonition.check:before,.admonition.done:before,.admonition.success:before{content:"done"}.admonition.attention,.admonition.caution,.admonition.warning{border-color:rgba(255,145,0,.4);background-color:rgba(255,145,0,.15)}.admonition.attention:before,.admonition.caution:before,.admonition.warning:before{content:"warning"}.admonition.fail,.admonition.failure,.admonition.missing{border-color:rgba(255,82,82,.4);background-color:rgba(255,82,82,.15)}.admonition.fail:before,.admonition.failure:before,.admonition.missing:before{content:"clear"}.admonition.danger,.admonition.error{border-color:rgba(255,23,68,.4);background-color:rgba(255,23,68,.15)}.admonition.danger:before,.admonition.error:before{content:"flash_on"}.admonition.bug{border-color:rgba(245,0,87,.4);background-color:rgba(245,0,87,.15)}.admonition.bug:before{content:"bug_report"}.admonition-title{font-weight:700}html .admonition-title{margin-bottom:0}html .admonition-title+*{margin-top:0}.codehilite .o,.codehilite .ow{color:inherit}.codehilite .ge{color:#000}.codehilite .gr{color:#a00}.codehilite .gh{color:#999}.codehilite .go{color:#888}.codehilite .gp{color:#555}.codehilite .gs{color:inherit}.codehilite .gu{color:#aaa}.codehilite .gt{color:#a00}.codehilite .gd{background-color:#fdd}.codehilite .gi{background-color:#dfd}.codehilite .k{color:#3b78e7}.codehilite .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn{color:#3b78e7}.codehilite .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt{color:#3e61a2}.codehilite .c,.codehilite .cm{color:#999}.codehilite .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs{color:#999}.codehilite .na,.codehilite .nb{color:#c2185b}.codehilite .bp{color:#3e61a2}.codehilite .nc{color:#c2185b}.codehilite .no{color:#3e61a2}.codehilite .nd,.codehilite .ni{color:#666}.codehilite .ne,.codehilite .nf{color:#c2185b}.codehilite .nl{color:#3b5179}.codehilite .nn{color:#ec407a}.codehilite .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi{color:#3e61a2}.codehilite .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc{color:#0d904f}.codehilite .sd{color:#999}.codehilite .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx{color:#183691}.codehilite .sr{color:#009926}.codehilite .s1,.codehilite .ss{color:#0d904f}.codehilite .err{color:#a61717}.codehilite .w{color:transparent}.codehilite .hll{display:block;margin:0 -1.2rem;padding:0 1.2rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite{margin:1em 0;padding:1rem 1.2rem .8rem;border-radius:.2rem;background-color:hsla(0,0%,93%,.5);color:#37474f;line-height:1.4;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .codehilite::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset .codehilite::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset .codehilite pre{display:inline-block;min-width:100%;margin:0;padding:0;background-color:transparent;overflow:visible;vertical-align:top}.md-typeset .codehilitetable{display:block;margin:1em 0;border-radius:.2em;font-size:1.6rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td{display:block;padding:0}.md-typeset .codehilitetable tr{display:-webkit-box;display:-ms-flexbox;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv{padding:1rem 1.2rem .8rem}.md-typeset .codehilitetable .linenodiv,.md-typeset .codehilitetable .linenodiv>pre{height:100%}.md-typeset .codehilitetable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable{box-shadow:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:before{display:block;height:0}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li:target:before{margin-top:-9rem;padding-top:9rem;pointer-events:none}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.5rem);transform:translateX(.5rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}.md-typeset .footnote-backref:before{font-size:1.6rem;content:"keyboard_return"}.md-typeset .headerlink{display:inline-block;margin-left:1rem;-webkit-transform:translateY(.5rem);transform:translateY(.5rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset [id]:before{display:inline-block;content:""}.md-typeset [id]:target:before{margin-top:-9.8rem;padding-top:9.8rem}.md-typeset [id] .headerlink:focus,.md-typeset [id]:hover .headerlink,.md-typeset [id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset [id] .headerlink:focus,.md-typeset [id]:hover .headerlink:hover,.md-typeset [id]:target .headerlink{color:#536dfe}.md-typeset h1[id] .headerlink{display:none}.md-typeset h2[id]:before{display:block;margin-top:-.2rem;padding-top:.2rem}.md-typeset h2[id]:target:before{margin-top:-8.2rem;padding-top:8.2rem}.md-typeset h3[id]:before{display:block;margin-top:-.4rem;padding-top:.4rem}.md-typeset h3[id]:target:before{margin-top:-8.4rem;padding-top:8.4rem}.md-typeset h4[id]:before{display:block;margin-top:-.6rem;padding-top:.6rem}.md-typeset h4[id]:target:before{margin-top:-8.6rem;padding-top:8.6rem}.md-typeset h5[id]:before{display:block;margin-top:-1rem;padding-top:1rem}.md-typeset h5[id]:target:before{margin-top:-9rem;padding-top:9rem}.md-typeset h6[id]:before{display:block;margin-top:-1rem;padding-top:1rem}.md-typeset h6[id]:target:before{margin-top:-9rem;padding-top:9rem}.md-typeset .MJXc-display{margin:.75em 0;padding:.25em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .comment.critic,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,93%,.5);color:#37474f;box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"chat";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:1.6rem;padding-left:1.6rem;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset .emojione{width:2rem;vertical-align:text-top}.md-typeset code.codehilite{margin:0 .29412em;padding:.07353em 0}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.05em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.5em;content:"check_box_outline_blank";vertical-align:-.25em}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"check_box"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code{box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-content__edit,.md-footer,.md-header,.md-sidebar,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -1.6rem;padding:1rem 1.6rem;border-radius:0}.codehilite .hll{margin:0 -1.6rem;padding:0 1.6rem}.md-typeset>.codehilite{padding:1rem 1.6rem .8rem}.md-typeset>.codehilite,.md-typeset>.codehilitetable{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilitetable .codehilite,.md-typeset>.codehilitetable .linenodiv{padding:1rem 1.6rem}.md-typeset>p>.MJXc-display{margin:.75em -1.6rem;padding:.25em 1.6rem}}@media only screen and (min-width:100em){html{font-size:68.75%}}@media only screen and (min-width:125em){html{font-size:75%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}.md-content__edit{margin-right:-.8rem}.md-nav--secondary{border-left:0}html .md-nav__link[for=toc]{display:block;padding-right:4.8rem}html .md-nav__link[for=toc]:after{color:inherit;content:"toc"}html .md-nav__link[for=toc]+.md-nav__link{display:none}html .md-nav__link[for=toc]~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-nav__source{display:block;padding:.4rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:2rem;background-color:#fff;opacity:0;overflow:hidden;z-index:1}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}.md-search__input{width:100%;height:5.6rem;font-size:1.8rem}.md-search__icon{top:1.6rem;left:1.6rem}.md-search__icon:before{content:"arrow_back"}.md-search__output{top:5.6rem;bottom:0}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;height:100%;z-index:1}.md-nav--primary{background-color:#fff}.md-nav--primary .md-nav__toggle~.md-nav{box-shadow:0 4px 5px 0 rgba(0,0,0,.14),0 1px 10px 0 rgba(0,0,0,.12),0 2px 4px -1px rgba(0,0,0,.4);background-color:#fff}html .md-nav--primary .md-nav__title{position:relative;height:11.2rem;padding:6rem 1.6rem .4rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:4.8rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background:-webkit-linear-gradient(top,#fff 10%,hsla(0,0%,100%,0)),-webkit-linear-gradient(top,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background:linear-gradient(180deg,#fff 10%,hsla(0,0%,100%,0)),linear-gradient(180deg,rgba(0,0,0,.26),rgba(0,0,0,.07) 35%,transparent 60%);background-attachment:local,scroll;background-color:#fff;background-repeat:no-repeat;background-size:100% 2rem,100% 1rem;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.4rem;left:.4rem;width:6.4rem;height:6.4rem;font-size:4.8rem}html .md-nav--primary .md-nav__title--site:before{display:none}.md-nav--primary .md-nav__list{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.1rem solid rgba(0,0,0,.07)}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:4.8rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"keyboard_arrow_right"}.md-nav--primary .md-nav__link{position:relative;padding:1.6rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:1.2rem;margin-top:-1.2rem;color:rgba(0,0,0,.54);font-size:2.4rem}.md-nav--primary .md-nav__link:hover:after{color:inherit}.md-nav--primary .md-nav--secondary .md-nav{position:static}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:2.8rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:4rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:5.2rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:6.4rem}.md-nav__toggle~.md-nav{display:none}.csstransforms3d .md-nav__toggle~.md-nav{-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}.csstransforms3d .md-nav__toggle~.md-nav,.md-nav__toggle:checked~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.csstransforms3d .md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.md-nav .md-nav__item,.md-nav .md-nav__title{font-size:1.6rem;line-height:1.5}.md-sidebar--primary{position:fixed;top:0;left:-24.2rem;width:24.2rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);background-color:#fff;z-index:2}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(24.2rem);transform:translateX(24.2rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}}@media only screen and (min-width:60em){.md-content{margin-right:24.2rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:23rem;max-width:23rem;padding-right:1.2rem}.md-search{margin-right:2.8rem;padding:.4rem}.md-search__inner{display:table;position:relative;clear:both}.md-search__form{width:23rem;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1);border-radius:.2rem}.md-search__input{width:100%;height:4rem;padding-left:4.8rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;border-radius:.2rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:1.6rem}.md-search__input+.md-search__icon,.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input+.md-search__icon,.md-search__input::-moz-placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input+.md-search__icon,.md-search__input:-ms-input-placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input+.md-search__icon,.md-search__input::placeholder{-webkit-transition:color .25s;transition:color .25s;color:#fff}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.2rem .2rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:none}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:4rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__link,.md-search-result__meta{padding-left:4.8rem}.md-sidebar--secondary{display:block;float:right}.md-sidebar--secondary[data-md-state=lock]{margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:24.2rem;overflow:auto}.md-content__inner{margin:2.4rem}.md-content__inner :last-child{margin-bottom:0}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__form{width:68.8rem}.md-sidebar__inner{border-right:.1rem solid rgba(0,0,0,.07)}}@media only screen and (max-width:29.9375em){.md-footer-nav__link--prev .md-footer-nav__title,.md-header-nav__parent{display:none}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em){.md-footer-nav__link{width:50%}}@media only screen and (min-width:45em){.md-footer-copyright{max-width:75%;float:left}.md-footer-social{padding:1.2rem 0;float:right}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__form{width:46.8rem}}@media only screen and (min-width:60em) and (min-width:76.25em){.md-sidebar--secondary[data-md-state=lock]{margin-left:122rem}}
\ No newline at end of file
diff --git a/material/base.html b/material/base.html
index 046e7d67dbf7491c0af00d69c71f1cf029d148a2..33c137f982b3d2b116fd674e65cb2ac12769d187 100644
--- a/material/base.html
+++ b/material/base.html
@@ -19,7 +19,7 @@
       {% else %}
         <link rel="shortcut icon" href="{{ base_url }}/assets/images/favicon.ico">
       {% endif %}
-      <meta name="generator" content="mkdocs+mkdocs-material#1.0.2">
+      <meta name="generator" content="mkdocs-{{ mkdocs_version }}, mkdocs-material-1.0.5">
     {% endblock %}
     {% block htmltitle %}
       {% if page.title %}
@@ -31,7 +31,13 @@
       {% endif %}
     {% endblock %}
     {% block libs %}
-      <script src="{{ base_url }}/assets/javascripts/modernizr-facb31f4a3.js"></script>
+      <script src="{{ base_url }}/assets/javascripts/modernizr-56ade86843.js"></script>
+    {% endblock %}
+    {% block styles %}
+      <link rel="stylesheet" href="{{ base_url }}/assets/stylesheets/application-9c62e3c932.css">
+      {% if config.extra.palette %}
+        <link rel="stylesheet" href="{{ base_url }}/assets/stylesheets/application-02ce7adcc2.palette.css">
+      {% endif %}
     {% endblock %}
     {% block fonts %}
       {% if config.extra.font != "none" %}
@@ -44,15 +50,9 @@
       {% endif %}
       <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
     {% endblock %}
-    {% block styles %}
-      <link rel="stylesheet" href="{{ base_url }}/assets/stylesheets/application-f3ab63f78a.css">
-      {% if config.extra.palette %}
-        <link rel="stylesheet" href="{{ base_url }}/assets/stylesheets/application-02ce7adcc2.palette.css">
-      {% endif %}
-      {% for path in extra_css %}
-        <link rel="stylesheet" href="{{ path }}">
-      {% endfor %}
-    {% endblock %}
+    {% for path in extra_css %}
+      <link rel="stylesheet" href="{{ path }}">
+    {% endfor %}
     {% block extrahead %}{% endblock %}
   </head>
   {% set palette = config.extra.get("palette", {}) %}
@@ -110,7 +110,7 @@
                  <a href="{{ page.edit_url }}" title="{{ lang.t('edit.link.title') }}" class="md-icon md-content__edit">edit</a>
                {% endif %}
               {% block content %}
-                {% if not "\x3ch1 id=" in page.content %}
+                {% if not "\x3ch1" in page.content %}
                   <h1>{{ page.title | default(config.site_name, true)}}</h1>
                 {% endif %}
                 {{ page.content }}
@@ -124,8 +124,8 @@
       {% endblock %}
     </div>
     {% block scripts %}
-      <script src="{{ base_url }}/assets/javascripts/application-16f434a21a.js"></script>
-      <script>var config={url:{base:"{{ base_url }}"}},app=new Application(config);app.initialize()</script>
+      <script src="{{ base_url }}/assets/javascripts/application-f7ac33b6fb.js"></script>
+      <script>app.initialize({url:{base:"{{ base_url }}"}})</script>
       {% for path in extra_javascript %}
         <script src="{{ path }}"></script>
       {% endfor %}
diff --git a/material/partials/nav-item.html b/material/partials/nav-item.html
index 651c6012013d18f04c950a92f7716b673bd36b76..be77f2f6bab516295ee7fa2a56c3985affb08463 100644
--- a/material/partials/nav-item.html
+++ b/material/partials/nav-item.html
@@ -25,10 +25,10 @@
   <li class="md-nav__item">
     {% set toc_ = page.toc %}
     <input class="md-toggle md-nav__toggle" data-md-toggle="toc" type="checkbox" id="toc">
-    {% if "\x3ch1 id=" in page.content %}
+    {% if toc_ | first is defined %}
       {% set toc_ = (toc_ | first).children %}
     {% endif %}
-    {% if toc_ and (toc_ | first) %}
+    {% if toc_ | first is defined %}
       <label class="md-nav__link md-nav__link--active" for="toc">
         {{ nav_item.title }}
       </label>
@@ -36,7 +36,7 @@
     <a href="{{ nav_item.url }}" title="{{ nav_item.title }}" class="md-nav__link md-nav__link--active">
       {{ nav_item.title }}
     </a>
-    {% if page.toc %}
+    {% if toc_ | first is defined %}
       {% include "partials/toc.html" %}
     {% endif %}
   </li>
diff --git a/material/partials/toc.html b/material/partials/toc.html
index 004067dda9103cf62ed2b27b9b50dc38f84b5b2f..5e44226b15829bf182ae58c126981cfdfe107281 100644
--- a/material/partials/toc.html
+++ b/material/partials/toc.html
@@ -1,10 +1,10 @@
 {% import "partials/language.html" as lang %}
 <nav class="md-nav md-nav--secondary">
   {% set toc_ = page.toc %}
-  {% if "\x3ch1 id=" in page.content %}
+  {% if toc_ | first is defined and "\x3ch1 id=" in page.content %}
     {% set toc_ = (toc_ | first).children %}
   {% endif %}
-  {% if toc_ and (toc_ | first) %}
+  {% if toc_ | first is defined %}
     <label class="md-nav__title" for="toc">{{ lang.t('toc.title') }}</label>
     <ul class="md-nav__list" data-md-scrollfix>
       {% for toc_item in toc_ %}
diff --git a/mkdocs.yml b/mkdocs.yml
index 216f2b10943eb0dd0405d3ce908ee6d10e703821..6f09d5b97c336be8bdccfa7d91cdf97b0e67dd7a 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -7,8 +7,8 @@ extra_css:
    - src/css.css
 
 # Repository
-#repo_name: it4i-admins/docs.it4i
-#repo_url: https://gitlab.it4i.cz/it4i-admins/docs.it4i
+repo_name: sccs/docs.it4i.cz
+repo_url: https://code.it4i.cz/sccs/docs.it4i.cz
 
 # Copyright
 copyright: Copyright (c) 2017 IT4Innovations__VERSION__
@@ -41,7 +41,7 @@ pages:
     - Compute Nodes: salomon/compute-nodes.md
     - Network:
       - InfiniBand Network: salomon/network.md
-      - IB Single-plane Topology: salomon/ib-single-plane-topology.md
+      - IB Single-Plane Topology: salomon/ib-single-plane-topology.md
       - 7D Enhanced Hypercube: salomon/7d-enhanced-hypercube.md
     - Storage: salomon/storage.md
     - PRACE User Support: salomon/prace.md
@@ -63,6 +63,8 @@ pages:
   - 'Software':
     - Lmod Environment: software/lmod.md
     - Modules Matrix: modules-matrix.md
+    - Singularity Container: software/singularity.md  
+    - EasyBuild: software/easybuild.md
     - Salomon Software:
       - Available Modules: modules-salomon.md
       - Available Modules on UV: modules-salomon-uv.md
@@ -119,6 +121,7 @@ pages:
         - 'Fortran':
           - OpenCoarrays: salomon/software/numerical-languages/opencoarrays.md
       - Operating System: salomon/software/operating-system.md
+      - ParaView: salomon/software/paraview.md
     - Anselm Software:
       - Available Modules: modules-anselm.md
       - 'ANSYS':
@@ -195,6 +198,7 @@ pages:
 
 extra:
   #logo: img/logo2.png
+  repo_icon: gitlab
   palette:
     primary: 'grey'
     accent: 'yellow'
@@ -206,13 +210,19 @@ extra:
     next: 'Next'
   social:
     - type: 'gitlab'
-      link: 'https://code.it4i.cz'
+      link: 'https://code.it4i.cz/sccs/docs.it4i.cz'
+    - type: globe
+      link: https://www.it4i.cz
 
 markdown_extensions:
   - codehilite
-  - admonition
-  - toc:
-      permalink: '(H)'
+  - markdown.extensions.admonition:
+  - pymdownx.arithmatex:
+  - markdown.extensions.toc:
+      slugify: !!python/name:pymdownx.slugs.uslugify
+      permalink: î…—
+  - pymdownx.snippets:
+      base_path: docs.it4i/snippets
 
 google_analytics:
   - 'UA-90498826-1'
diff --git a/scripts/check_version.rb b/scripts/check_version.rb
new file mode 100644
index 0000000000000000000000000000000000000000..9d64e39e338c22ba49b51187591f11ba5021a630
--- /dev/null
+++ b/scripts/check_version.rb
@@ -0,0 +1,23 @@
+require 'faraday'
+require 'json'
+
+#print ARGV[0].downcase
+puts "Checking the ver for #{ARGV[0]}"
+
+response = Faraday.get "https://release-monitoring.org/api/projects/?pattern=#{ARGV[0].downcase}"
+
+fedora_j = JSON.parse( response.body )['projects']
+fedora = fedora_j.select { |h| h['name'] == ARGV[0].downcase }.first['version']
+
+response = Faraday.get "https://docs.it4i.cz/modules-matrix.json"
+
+it4i = JSON.parse( response.body )['projects'][ARGV[0]]
+it4i_short = it4i.split('-')[0]
+puts "IT4Innovations: #{it4i_short} (#{it4i})"
+puts "Upstream      : #{fedora}"
+
+if it4i_short.eql? fedora
+  puts "Identical"
+else
+
+end
diff --git a/scripts/modules-json.py b/scripts/modules-json.py
new file mode 100755
index 0000000000000000000000000000000000000000..f5abe70efbc797b62ec0f642bc11d29351e0ab6c
--- /dev/null
+++ b/scripts/modules-json.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+import csv
+import collections
+import json
+from distutils.version import LooseVersion
+
+def get_data(filename):
+    '''function to read the data form the input csv file to use in the analysis'''
+    reader = [] # Just in case the file open fails
+    with open(filename, 'rb') as f:
+        reader = csv.reader(f,delimiter=',')                      
+        #returns all the data from the csv file in list form
+        #f.close() # May need to close the file when done
+        return list(reader)  # only return the reader when you have finished.
+
+your_list = []
+your_list += get_data('./scripts/modules-anselm.csv')
+your_list += get_data('./scripts/modules-salomon.csv')
+your_list += get_data('./scripts/modules-salomon-uv.csv')
+#print your_list
+
+#a=[["python/2.8.1",1],["python/2.9.1",2],["python/2.8.1",4],["python/3.0.1",4]]
+counts = dict()
+for i in your_list:
+  #print i[0]
+  #print int(i[1])
+  counts[i[0]]=counts.get(i[0], 0) + int(i[1])
+
+#print sorted(counts.items())
+
+c=[
+"---",
+"--A",
+"-S-",
+"-SA",
+"U--",
+"U-A",
+"US-",
+"USA",
+]
+
+software = dict()
+versions = ''
+clusters = ''
+prev = ''
+
+for m,i in sorted(counts.items()):
+  #print m
+  split =  m.split('/')
+  #print split
+  if len(split) > 1:
+    a = split[0]
+    b = split[1]
+    if split[0] <> prev:
+      software[a] = {}
+    software[a][b] = '`' + c[i] + '`'
+    prev = a
+
+
+packages = {}
+
+for m in sorted(software.items(), key=lambda i: i[0].lower()):
+  packages[m[0]]=sorted(m[1], key=LooseVersion)[len(m[1])-1]
+
+data = {'total': len(packages), 'projects': packages } 
+print json.dumps(data)
diff --git a/scripts/modules-matrix.py b/scripts/modules-matrix.py
index 736f6a9b15f48e82b290e1dc2ff2034a72325a07..eab86f22b7953ab637699f35c7b441bb7038c0d6 100755
--- a/scripts/modules-matrix.py
+++ b/scripts/modules-matrix.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
+# -*- coding: utf-8 -*-
 import csv
 import collections
+from distutils.version import LooseVersion
 
 def get_data(filename):
     '''function to read the data form the input csv file to use in the analysis'''
@@ -38,11 +40,9 @@ c=[
 ]
 
 print '!!! Hint "Cluster Acronyms"'
-print '    \* A - Anselm'
-print '    \* S - Salomon'
-print '    \* U - uv1 at Salomon'
-
-print "| Module | Versions | Clusters |"
+print '    A - Anselm • S - Salomon • U - uv1 at Salomon'
+print
+print '| Module </br><input id="searchInput" placeholder="🔍 Filter" style="width: 8rem; border-radius: 0.2rem; color: black; padding-left: .2rem;"> | Versions | Clusters |'
 print "| ------ | -------- | -------- |"
 
 software = dict()
@@ -66,8 +66,12 @@ for m,i in sorted(counts.items()):
 
 for m in sorted(software.items(), key=lambda i: i[0].lower()):
   software = m[0]
-  versions = ''
-  clusters = ''
-  #print '</br>'.join(m[1].keys())
-  #print '</br>'.join(m[1].values())
-  print "| %s | %s | %s |" % (software, '</br>'.join(m[1].keys()), '</br>'.join(m[1].values()))
+  versions = []
+  clusters = []
+  for key in sorted(m[1], key=LooseVersion ):
+    versions.append(key)
+    clusters.append(m[1][key])
+  print "| %s | %s | %s |" % (software, '</br>'.join(versions), '</br>'.join(clusters))
+
+print
+print '---8<--- "modules_matrix_search.md"'
diff --git a/scripts/titlemd_test.py b/scripts/titlemd_test.py
index 38ad036adb514ac8b857b831155a62d5ef5f7b2e..49287c0ec98e78539157fdb4a8b27b1b550616a9 100755
--- a/scripts/titlemd_test.py
+++ b/scripts/titlemd_test.py
@@ -26,16 +26,18 @@ def main(location):
       # Loop through the list of lines and titlecase
       # any line beginning with '#'.
       return_value = 0
-      prev_line = lines.pop(0)
+      prev_line = lines[0]
       disabled = 0
       echo_filename = False
+      if location.find("mkdocs.yml") != -1:
+          disabled = 1
       for line in lines:
-          if line.startswith("``") and disabled == 0:
+          if (line.startswith("``") or line.startswith("extra:")) and disabled == 0:
               disabled = 1
           else:
-              if line.startswith("``") and disabled == 1:
+              if (line.startswith("``") or prev_line.startswith("pages:")) and disabled == 1:
                   disabled = 0
-          if line.startswith('#') and disabled == 0:
+          if line.startswith('#') and (disabled == 0) and (location.find("mkdocs.yml") == -1):
             if line != titlecase(line[:(line.find("]"))], callback=abbreviations)+line[(line.find("]")):]:
               if return_value == 0 and echo_filename == False:
                 print("%s" % location)
@@ -53,6 +55,15 @@ def main(location):
               print("+"+titlecase(prev_line[:(prev_line.find("]"))], callback=abbreviations)+prev_line[(prev_line.find("]")):],end="")
               print()
               return_value = 1
+          if ((location.find("mkdocs.yml") != -1) and not line.startswith('#') and disabled == 0):
+            if line != titlecase(line[:(line.find(":"))], callback=abbreviations)+line[(line.find(":")):]:
+              if return_value == 0 and echo_filename == False:
+                print("%s" % location)
+                echo_filename = True
+              print("-"+line,end="")
+              print("+"+titlecase(line[:(line.find(":"))], callback=abbreviations)+line[(line.find(":")):],end="")
+              print()
+              return_value = 1
           prev_line = line
       exit(return_value)
 if __name__ == "__main__":