diff --git a/.spelling b/.spelling
index 1cb0368c8bf540de371e775f49e4848e7a78d425..64de0b4058261377677213f466750d4cc34dd95d 100644
--- a/.spelling
+++ b/.spelling
@@ -33,6 +33,7 @@ TotalView
 Valgrind
 ParaView
 OpenFOAM
+MAX_FAIRSHARE
 MPI4Py
 MPICH2
 PETSc
@@ -86,6 +87,173 @@ AnyConnect
 X11
 backfilling
 backfilled
+SCP
+Lustre
+QDR
+TFLOP
+ncpus
+myjob
+pernode
+mpiprocs
+ompthreads
+qprace
+runtime
+SVS
+ppn
+Multiphysics
+aeroacoustics
+turbomachinery
+CFD
+LS-DYNA
+APDL
+MAPDL
+multiphysics
+AUTODYN
+RSM
+Molpro
+initio
+parallelization
+NWChem
+SCF
+ISV
+profiler
+Pthreads
+profilers
+OTF
+PAPI
+PCM
+uncore
+pre-processing
+prepend
+CXX
+prepended
+POMP2
+Memcheck
+unaddressable
+OTF2
+GPI-2
+GASPI
+GPI
+MKL
+IPP
+TBB
+GSL
+Omics
+VNC
+Scalasca
+IFORT
+interprocedural
+IDB
+cloop
+qcow
+qcow2
+vmdk
+vdi
+virtio
+paravirtualized
+Gbit
+tap0
+UDP
+TCP
+preload
+qfat
+Rmpi
+DCT
+datasets
+dataset
+preconditioners
+partitioners
+PARDISO
+PaStiX
+SuiteSparse
+SuperLU
+ExodusII
+NetCDF
+ParMETIS
+multigrid
+HYPRE
+SPAI
+Epetra
+EpetraExt
+Tpetra
+64-bit
+Belos
+GMRES
+Amesos
+IFPACK
+preconditioner
+Teuchos
+Makefiles
+SAXPY
+NVCC
+VCF
+HGMD
+HUMSAVAR
+ClinVar
+indels
+CIBERER
+exomes
+tmp
+SSHFS
+RSYNC
+unmount
+Cygwin
+CygwinX
+RFB
+TightVNC
+TigerVNC
+GUIs
+XLaunch
+UTF-8
+numpad
+PuTTYgen
+OpenSSH
+IE11
+x86
+r21u01n577
+7120P
+interprocessor
+IPN
+toolchains
+toolchain
+APIs
+easyblocks
+GM200
+GeForce
+GTX
+IRUs
+ASIC
+backplane
+ICEX
+IRU
+PFLOP
+T950B
+ifconfig
+inet
+addr
+checkbox
+appfile
+programmatically
+http
+https
+filesystem
+phono3py
+HDF
+splitted
+automize
+llvm
+PGI
+GUPC
+BUPC
+IBV
+Aislinn
+nondeterminism
+stdout
+stderr
+i.e.
+pthreads
+uninitialised
+broadcasted
  - docs.it4i/anselm-cluster-documentation/environment-and-modules.md
 MODULEPATH
 bashrc
@@ -119,6 +287,7 @@ Rmax
 E5-2665
 E5-2470
 P5110
+isw
  - docs.it4i/anselm-cluster-documentation/introduction.md
 RedHat
  - docs.it4i/anselm-cluster-documentation/job-priority.md
@@ -126,6 +295,8 @@ walltime
 qexp
 _List.fairshare
 _time
+_FAIRSHARE
+1E6
  - docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md
 15209.srv11
 qsub
@@ -146,6 +317,15 @@ jobscript
 cn108
 cn109
 cn110
+Name0
+cn17
+_NODEFILE
+_O
+_WORKDIR
+mympiprog.x
+_JOBID
+myprog.x
+openmpi
  - docs.it4i/anselm-cluster-documentation/network.md
 ib0
  - docs.it4i/anselm-cluster-documentation/prace.md
@@ -153,14 +333,19 @@ PRACE
 qfree
 it4ifree
 it4i.portal.clients
+prace
+1h
  - docs.it4i/anselm-cluster-documentation/shell-and-data-access.md
 VPN
  - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md
 ANSYS
 CFX
 cfx.pbs
+_r
+ane3fl
  - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md
 mapdl.pbs
+_dy
  - docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md
 HPC
 lsdyna.pbs
@@ -175,9 +360,25 @@ Makefile
  - docs.it4i/anselm-cluster-documentation/software/gpi2.md
 gcc
 cn79
+helloworld
+_gpi.c
+ibverbs
+gaspi
+_logger
  - docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md
 Haswell
 CPUs
+ipo
+O3
+vec
+xAVX
+omp
+simd
+ivdep
+pragmas
+openmp
+xCORE-AVX2
+axCORE-AVX2
  - docs.it4i/anselm-cluster-documentation/software/kvirtualization.md
 rc.local
 runlevel
@@ -189,6 +390,8 @@ VDE
 smb.conf
 TMPDIR
 run.bat.
+slirp
+NATs
  - docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md
 NumPy
  - docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md
@@ -197,33 +400,73 @@ matlabcode.m
 output.out
 matlabcodefile
 sched
+_feature
  - docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md
 UV2000
+maxNumCompThreads
+SalomonPBSPro
  - docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md
 _THREADS
+_NUM
  - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md
 CMake-aware
 Makefile.export
+_PACKAGE
+_CXX
+_COMPILER
+_INCLUDE
+_DIRS
+_LIBRARY
  - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md
 ansysdyna.pbs
  - docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md
 svsfem.cz
+_
  - docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md
 libmpiwrap-amd64-linux
+O0
+valgrind
+malloc
+_PRELOAD
  - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md
 cn204
+_LIBS
+MAGMAROOT
+_magma
+_server
+_anselm
+_from
+_mic.sh
+_dgetrf
+_mic
+_03.pdf
  - docs.it4i/anselm-cluster-documentation/software/paraview.md
 cn77
 localhost
+v4.0.1
  - docs.it4i/anselm-cluster-documentation/storage.md
 ssh.du1.cesnet.cz
 Plzen
 ssh.du2.cesnet.cz
 ssh.du3.cesnet.cz
+tier1
+_home
+_cache
+_tape
  - docs.it4i/salomon/environment-and-modules.md
 icc
+ictce
+ifort
+imkl
+intel
+gompi
+goolf
+BLACS
+iompi
+iccifort
  - docs.it4i/salomon/hardware-overview.md
 HW
+E5-4627v2
  - docs.it4i/salomon/job-submission-and-execution.md
 15209.isrv5
 r21u01n577
@@ -248,6 +491,7 @@ mkdir
 mympiprog.x
 mpiexec
 myprog.x
+r4i7n0.ib0.smc.salomon.it4i.cz
  - docs.it4i/salomon/7d-enhanced-hypercube.md
 cns1
 cns576
@@ -256,7 +500,165 @@ r4i7n17
 cns577
 cns1008
 r37u31n1008
+7D
  - docs.it4i/anselm-cluster-documentation/resources-allocation-policy.md
 qsub
 it4ifree
 it4i.portal.clients
+ - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md
+anslic
+_admin
+ - docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md
+_DIR
+ - docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md
+EDU
+comsol
+_matlab.pbs
+_job.m
+mphstart
+ - docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md
+perf-report
+perf
+txt
+html
+mympiprog
+_32p
+ - docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md
+Hotspots
+ - docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md
+scorep
+ - docs.it4i/anselm-cluster-documentation/software/isv_licenses.md
+edu
+ansys
+_features
+_state.txt
+f1
+matlab
+acfd
+_ansys
+_acfd
+_aa
+_comsol
+HEATTRANSFER
+_HEATTRANSFER
+COMSOLBATCH
+_COMSOLBATCH
+STRUCTURALMECHANICS
+_STRUCTURALMECHANICS
+_matlab
+_Toolbox
+_Image
+_Distrib
+_Comp
+_Engine
+_Acquisition
+pmode
+matlabpool
+ - docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md
+mpirun
+BLAS1
+FFT
+KMP
+_AFFINITY
+GOMP
+_CPU
+bullxmpi-1
+mpich2
+ - docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md
+bysocket
+bycore
+ - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md
+gcc3.3.3
+pthread
+fftw3
+lfftw3
+_threads-lfftw3
+_omp
+icc3.3.3
+FFTW2
+gcc2.1.5
+fftw2
+lfftw
+_threads
+icc2.1.5
+fftw-mpi3
+_mpi
+fftw3-mpi
+fftw2-mpi
+IntelMPI
+ - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/gsl.md
+dwt.c
+mkl
+lgsl
+ - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md
+icc
+hdf5
+_INC
+_SHLIB
+_CPP
+_LIB
+_F90
+gcc49
+ - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md
+_Dist
+ - docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md
+lcublas
+ - docs.it4i/anselm-cluster-documentation/software/operating-system.md
+6.x
+ - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md
+startxwin
+cygwin64binXWin.exe
+tcp
+ - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md
+Xming
+XWin.exe.
+ - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md
+_rsa.ppk
+ - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md
+_keys
+organization.example.com
+_rsa
+ - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md
+vpnui.exe
+ - docs.it4i/salomon/ib-single-plane-topology.md
+36-port
+Mcell.pdf
+r21-r38
+nodes.pdf
+ - docs.it4i/salomon/introduction.md
+E5-2680v3
+ - docs.it4i/salomon/network.md
+r4i1n0
+r4i1n1
+r4i1n2
+r4i1n3
+ip
+ - docs.it4i/salomon/software/ansys/setting-license-preferences.md
+ansys161
+ - docs.it4i/salomon/software/ansys/workbench.md
+mpifile.txt
+solvehandlers.xml
+ - docs.it4i/salomon/software/chemistry/phono3py.md
+vasprun.xml
+disp-XXXXX
+disp
+_fc3.yaml
+ir
+_grid
+_points.yaml
+gofree-cond1
+ - docs.it4i/salomon/software/compilers.md
+HPF
+ - docs.it4i/salomon/software/comsol/licensing-and-available-versions.md
+ver
+ - docs.it4i/salomon/software/debuggers/aislinn.md
+test.cpp
+ - docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
+vtune
+_update1
+ - docs.it4i/salomon/software/debuggers/valgrind.md
+EBROOTVALGRIND
+ - docs.it4i/salomon/software/intel-suite/intel-advisor.md
+O2
+ - docs.it4i/salomon/software/intel-suite/intel-compilers.md
+UV1
diff --git a/docs.it4i/anselm/environment-and-modules.md b/docs.it4i/anselm/environment-and-modules.md
index 21230e9e07911f4632105d0f4c8006aff7393254..a0aa6d0b0e830a3d8b0fdd37fea869be99548540 100644
--- a/docs.it4i/anselm/environment-and-modules.md
+++ b/docs.it4i/anselm/environment-and-modules.md
@@ -24,7 +24,7 @@ fi
 ```
 
 !!! note
-    Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Conside utilization of SSH session interactivity for such commands as stated in the previous example.
+	Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (SCP, PBS) of your account! Consider utilization of SSH session interactivity for such commands as stated in the previous example.
 
 ## Application Modules
 
diff --git a/docs.it4i/anselm/job-submission-and-execution.md b/docs.it4i/anselm/job-submission-and-execution.md
index b0ea19bd17cecb7fb2199c6112e0e0340a1d0b1a..f0b88f594e293dd794f0c8b30895c446274f50a1 100644
--- a/docs.it4i/anselm/job-submission-and-execution.md
+++ b/docs.it4i/anselm/job-submission-and-execution.md
@@ -322,7 +322,7 @@ cd $SCRDIR || exit
 cp $PBS_O_WORKDIR/input .
 cp $PBS_O_WORKDIR/mympiprog.x .
 
-# load the mpi module
+# load the MPI module
 module load openmpi
 
 # execute the calculation
@@ -360,7 +360,7 @@ Example jobscript for an MPI job with preloaded inputs and executables, options
 SCRDIR=/scratch/$USER/myjob
 cd $SCRDIR || exit
 
-# load the mpi module
+# load the MPI module
 module load openmpi
 
 # execute the calculation
diff --git a/docs.it4i/anselm/software/ansys/ansys-cfx.md b/docs.it4i/anselm/software/ansys/ansys-cfx.md
index b816f026430e7573ddffb81d5ced15770994435b..343bb267036c43c00c1e7020b91e304b3ed8cde5 100644
--- a/docs.it4i/anselm/software/ansys/ansys-cfx.md
+++ b/docs.it4i/anselm/software/ansys/ansys-cfx.md
@@ -49,7 +49,7 @@ echo Machines: $hl
 
 Header of the PBS file (above) is common and description can be find on [this site](../../job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
-Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def
+Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def
 
 **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**.
 [More about licensing here](licensing/)
diff --git a/docs.it4i/anselm/software/debuggers/allinea-ddt.md b/docs.it4i/anselm/software/debuggers/allinea-ddt.md
index 6c1c664fb22163d3f9eadd023486494870f2a0a9..a5dd069ba07728662ce817cb713eeb7325002998 100644
--- a/docs.it4i/anselm/software/debuggers/allinea-ddt.md
+++ b/docs.it4i/anselm/software/debuggers/allinea-ddt.md
@@ -4,7 +4,7 @@ Allinea Forge consist of two tools - debugger DDT and profiler MAP.
 
 Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process - even if these processes are distributed across a cluster using an MPI implementation.
 
-Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses pthreads, OpenMP or MPI.
+Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses Pthreads, OpenMP or MPI.
 
 ## License and Limitations for Anselm Users
 
diff --git a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
index 614e6277ba5fcb8401b9a68668626709aa143ede..7b519b85c094122ff7866ad8a48961ee89dbb4b9 100644
--- a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
+++ b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md
@@ -29,7 +29,7 @@ Instead of [running your MPI program the usual way](../mpi/), use the the perf r
     $ perf-report mpirun ./mympiprog.x
 ```
 
-The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../job-submission-and-execution/).
+The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../job-submission-and-execution/).
 
 ## Example
 
diff --git a/docs.it4i/anselm/software/debuggers/cube.md b/docs.it4i/anselm/software/debuggers/cube.md
index a7f88955e78159f5800a37e603f91fa09e3ccdbe..29bd38627031493948b74610deac1c66f3378f1f 100644
--- a/docs.it4i/anselm/software/debuggers/cube.md
+++ b/docs.it4i/anselm/software/debuggers/cube.md
@@ -30,7 +30,7 @@ CUBE is a graphical application. Refer to Graphical User Interface documentation
 !!! note
     Analyzing large data sets can consume large amount of CPU and RAM. Do not perform large analysis on login nodes.
 
-After loading the appropriate module, simply launch cube command, or alternatively you can use scalasca -examine command to launch the GUI. Note that for Scalasca datasets, if you do not analyze the data with scalasca -examine before to opening them with CUBE, not all performance data will be available.
+After loading the appropriate module, simply launch cube command, or alternatively you can use Scalasca -examine command to launch the GUI. Note that for Scalasca data sets, if you do not analyze the data with `scalasca -examine` before to opening them with CUBE, not all performance data will be available.
 
 References
 1\.  <http://www.scalasca.org/software/cube-4.x/download.html>
diff --git a/docs.it4i/anselm/software/debuggers/papi.md b/docs.it4i/anselm/software/debuggers/papi.md
index bc36923e83e2d464b40e41b3b43ce4316289c3f4..2ea155932905439904335bd5712df30dd19f5b64 100644
--- a/docs.it4i/anselm/software/debuggers/papi.md
+++ b/docs.it4i/anselm/software/debuggers/papi.md
@@ -10,13 +10,13 @@ PAPI can be used with parallel as well as serial programs.
 
 ## Usage
 
-To use PAPI, load [module](../../environment-and-modules/) papi:
+To use PAPI, load [module](../../environment-and-modules/) PAPI:
 
 ```bash
     $ module load papi
 ```
 
-This will load the default version. Execute module avail papi for a list of installed versions.
+This will load the default version. Execute module avail pap for a list of installed versions.
 
 ## Utilities
 
diff --git a/docs.it4i/anselm/software/debuggers/scalasca.md b/docs.it4i/anselm/software/debuggers/scalasca.md
index 19daec04e24247f40721c8ef61632d17290daa80..c01271a5349cc7d90a9044165246c0051b99b7c0 100644
--- a/docs.it4i/anselm/software/debuggers/scalasca.md
+++ b/docs.it4i/anselm/software/debuggers/scalasca.md
@@ -23,13 +23,13 @@ Profiling a parallel application with Scalasca consists of three steps:
 
 ### Instrumentation
 
-Instrumentation via " scalasca -instrument" is discouraged. Use [Score-P instrumentation](score-p/).
+Instrumentation via `scalasca -instrument` is discouraged. Use [Score-P instrumentation](score-p/).
 
 ### Runtime Measurement
 
 After the application is instrumented, runtime measurement can be performed with the `scalasca -analyze` command. The syntax is:
 
-`scalasca -analyze [scalasca options][launcher] [launcher options][program] [program options]`
+`scalasca -analyze [scalasca options] [launcher] [launcher options] [program] [program options]`
 
 An example :
 
@@ -61,7 +61,7 @@ If you do not wish to launch the GUI tool, use the "-s" option :
 scalasca -examine -s <experiment_directory>
 ```
 
-Alternatively you can open CUBE and load the data directly from here. Keep in mind that in that case the preprocessing is not done and not all metrics will be shown in the viewer.
+Alternatively you can open CUBE and load the data directly from here. Keep in mind that in that case the pre-processing is not done and not all metrics will be shown in the viewer.
 
 Refer to [CUBE documentation](cube/) on usage of the GUI viewer.
 
diff --git a/docs.it4i/anselm/software/debuggers/valgrind.md b/docs.it4i/anselm/software/debuggers/valgrind.md
index 2602fdbf24c9bdf16503740541ed81c536628b5a..0f704ebcde028d4acea42c8f474230c41d12325c 100644
--- a/docs.it4i/anselm/software/debuggers/valgrind.md
+++ b/docs.it4i/anselm/software/debuggers/valgrind.md
@@ -259,4 +259,4 @@ Prints this output : (note that there is output printed for every launched MPI p
     ==31319== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 4 from 4)
 ```
 
-We can see that Valgrind has reported use of unitialised memory on the master process (which reads the array to be broadcast) and use of unaddresable memory on both processes.
+We can see that Valgrind has reported use of uninitialized memory on the master process (which reads the array to be broadcast) and use of unaddressable memory on both processes.
diff --git a/docs.it4i/anselm/software/gpi2.md b/docs.it4i/anselm/software/gpi2.md
index ec96e2653a3bfeb9614be13b969ff3273b3ee255..2bf869695196b533c41ee3669e4223123135bcde 100644
--- a/docs.it4i/anselm/software/gpi2.md
+++ b/docs.it4i/anselm/software/gpi2.md
@@ -155,7 +155,7 @@ Submit the job and run the GPI-2 application
     Hello from rank 0 of 2
 ```
 
-At the same time, in another session, you may start the gaspi logger:
+At the same time, in another session, you may start the GASPI logger:
 
 ```bash
     $ ssh cn79
diff --git a/docs.it4i/anselm/software/intel-suite/intel-compilers.md b/docs.it4i/anselm/software/intel-suite/intel-compilers.md
index 66de3b77a06d7333464336ada10d68cd3a899aa8..9cc1f1e0af6abf09cc00d39f7c7dd1c37d78c9e1 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-compilers.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-compilers.md
@@ -1,6 +1,6 @@
 # Intel Compilers
 
-The Intel compilers version 13.1.1 are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler.
+The Intel compilers version 13.1.1 are available, via module Intel. The compilers include the ICC C and C++ compiler and the IFORT Fortran 77/90/95 compiler.
 
 ```bash
     $ module load intel
@@ -8,7 +8,7 @@ The Intel compilers version 13.1.1 are available, via module intel. The compiler
     $ ifort -v
 ```
 
-The intel compilers provide for vectorization of the code, via the AVX instructions and support threading parallelization via OpenMP
+The Intel compilers provide for vectorization of the code, via the AVX instructions and support threading parallelization via OpenMP
 
 For maximum performance on the Anselm cluster, compile your programs using the AVX instructions, with reporting where the vectorization was used. We recommend following compilation options for high performance
 
diff --git a/docs.it4i/anselm/software/intel-suite/intel-debugger.md b/docs.it4i/anselm/software/intel-suite/intel-debugger.md
index f13086df7431676a95a75b5258a10667a3464c57..d3acd2c51a364c9a48a7b26c0fcfb96256e0bc5f 100644
--- a/docs.it4i/anselm/software/intel-suite/intel-debugger.md
+++ b/docs.it4i/anselm/software/intel-suite/intel-debugger.md
@@ -70,4 +70,4 @@ Run the idb debugger in GUI mode. The menu Parallel contains number of tools for
 
 ## Further Information
 
-Exhaustive manual on idb features and usage is published at [Intel website](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/debugger/user_guide/index.htm)
+Exhaustive manual on IDB features and usage is published at [Intel website](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/debugger/user_guide/index.htm)
diff --git a/docs.it4i/anselm/software/isv_licenses.md b/docs.it4i/anselm/software/isv_licenses.md
index 56270b51feca30fe2ec4f297da6cb0d6ee62d6e7..1baf941db8c8f8090fdf8f26cf60a336f0d71842 100644
--- a/docs.it4i/anselm/software/isv_licenses.md
+++ b/docs.it4i/anselm/software/isv_licenses.md
@@ -61,11 +61,13 @@ The general format of the name is `feature__APP__FEATURE`.
 
 Names of applications (APP):
 
-* ansys
-* comsol
-* comsol-edu
-* matlab
-* matlab-edu
+```bash
+    ansys
+    comsol
+    comsol-edu
+    matlab
+    matlab-edu
+```
 
 To get the FEATUREs of a license take a look into the corresponding state file ([see above](isv_licenses/#Licence)), or use:
 
diff --git a/docs.it4i/anselm/software/mpi/mpi.md b/docs.it4i/anselm/software/mpi/mpi.md
index bc60afb16ebee9968d942c0e4189f79705118276..08be4bce682e507968cbbc0a03b32854193d3352 100644
--- a/docs.it4i/anselm/software/mpi/mpi.md
+++ b/docs.it4i/anselm/software/mpi/mpi.md
@@ -111,7 +111,7 @@ Compile the above example with
     The MPI program executable must be compatible with the loaded MPI module.
     Always compile and execute using the very same MPI module.
 
-It is strongly discouraged to mix mpi implementations. Linking an application with one MPI implementation and running mpirun/mpiexec form other implementation may result in unexpected errors.
+It is strongly discouraged to mix MPI implementations. Linking an application with one MPI implementation and running mpirun/mpiexec form other implementation may result in unexpected errors.
 
 The MPI program executable must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch file system. You need to preload the executable, if running on the local scratch /lscratch file system.
 
diff --git a/docs.it4i/anselm/software/numerical-languages/matlab.md b/docs.it4i/anselm/software/numerical-languages/matlab.md
index d7c3d907452ca38deea8f07235170ead3114c1eb..cfc958b7e93cde09f34e424d92717f53915097d4 100644
--- a/docs.it4i/anselm/software/numerical-languages/matlab.md
+++ b/docs.it4i/anselm/software/numerical-languages/matlab.md
@@ -274,8 +274,8 @@ You can use MATLAB on UV2000 in two parallel modes:
 
 ### Threaded Mode
 
-Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as fft, , eig, svd, etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes.
+Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as `fft`, `eig`, `svd` etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes.
 
 ### Local Cluster Mode
 
-You can also use Parallel Toolbox on UV2000. Use l[ocal cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work.
+You can also use Parallel Toolbox on UV2000. Use [local cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work.
diff --git a/docs.it4i/anselm/software/numerical-languages/matlab_1314.md b/docs.it4i/anselm/software/numerical-languages/matlab_1314.md
index 8c1012531c67f272907e154addb5f336e636eaf6..ca2a6261648efd0f9a2e29ff980b6b1b2fd6a361 100644
--- a/docs.it4i/anselm/software/numerical-languages/matlab_1314.md
+++ b/docs.it4i/anselm/software/numerical-languages/matlab_1314.md
@@ -84,7 +84,7 @@ Once this file is in place, user can request resources from PBS. Following examp
     -l feature__matlab__MATLAB=1
 ```
 
-This qsub command example shows how to run Matlab with 32 workers in following configuration: 2 nodes (use all 16 cores per node) and 16 workers = mpirocs per node (-l select=2:ncpus=16:mpiprocs=16). If user requires to run smaller number of workers per node then the "mpiprocs" parameter has to be changed.
+This qsub command example shows how to run Matlab with 32 workers in following configuration: 2 nodes (use all 16 cores per node) and 16 workers = mpiprocs per node (-l select=2:ncpus=16:mpiprocs=16). If user requires to run smaller number of workers per node then the "mpiprocs" parameter has to be changed.
 
 The second part of the command shows how to request all necessary licenses. In this case 1 Matlab-EDU license and 32 Distributed Computing Engines licenses.
 
diff --git a/docs.it4i/anselm/software/numerical-libraries/fftw.md b/docs.it4i/anselm/software/numerical-libraries/fftw.md
index 038e1223a44cde79a37f2f7fe59fab9f7e5a8e8e..ed14410d2201296856082e5d259ffbadd01c4031 100644
--- a/docs.it4i/anselm/software/numerical-libraries/fftw.md
+++ b/docs.it4i/anselm/software/numerical-libraries/fftw.md
@@ -4,7 +4,7 @@ The discrete Fourier transform in one or more dimensions, MPI parallel
 
 FFTW is a C subroutine library for computing the discrete Fourier transform in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, e.g. the discrete cosine/sine transforms or DCT/DST). The FFTW library allows for MPI parallel, in-place discrete Fourier transform, with data distributed over number of nodes.
 
-Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules:
+Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each compiled for **Intel MPI** and **OpenMPI** using **Intel** and **gnu** compilers. These are available via modules:
 
 | Version        | Parallelization | module              | linker options                      |
 | -------------- | --------------- | ------------------- | ----------------------------------- |
diff --git a/docs.it4i/anselm/software/numerical-libraries/hdf5.md b/docs.it4i/anselm/software/numerical-libraries/hdf5.md
index d9abd72c405ab3ff867203fbe7c9408e9e7c5d7c..5551864b7bfe17f83203890af0e6250fcf646642 100644
--- a/docs.it4i/anselm/software/numerical-libraries/hdf5.md
+++ b/docs.it4i/anselm/software/numerical-libraries/hdf5.md
@@ -4,7 +4,7 @@ Hierarchical Data Format library. Serial and MPI parallel version.
 
 [HDF5 (Hierarchical Data Format)](http://www.hdfgroup.org/HDF5/) is a general purpose library and file format for storing scientific data. HDF5 can store two primary objects: datasets and groups. A dataset is essentially a multidimensional array of data elements, and a group is a structure for organizing objects in an HDF5 file. Using these two basic objects, one can create and store almost any kind of scientific data structure, such as images, arrays of vectors, and structured and unstructured grids. You can also mix and match them in HDF5 files according to your needs.
 
-Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules:
+Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, compiled for **Intel MPI** and **OpenMPI** using **Intel** and **gnu** compilers. These are available via modules:
 
 | Version               | Parallelization                   | module                     | C linker options      | C++ linker options      | Fortran linker options  |
 | --------------------- | --------------------------------- | -------------------------- | --------------------- | ----------------------- | ----------------------- |
diff --git a/docs.it4i/anselm/software/numerical-libraries/petsc.md b/docs.it4i/anselm/software/numerical-libraries/petsc.md
index 528d13ddbcaffdc9f8b0a80bee379b05602317d7..beb0715cc4580eec6244ff7e809e3c1a881689e2 100644
--- a/docs.it4i/anselm/software/numerical-libraries/petsc.md
+++ b/docs.it4i/anselm/software/numerical-libraries/petsc.md
@@ -32,9 +32,7 @@ PETSc needs at least MPI, BLAS and LAPACK. These dependencies are currently sati
 PETSc can be linked with a plethora of [external numerical libraries](http://www.mcs.anl.gov/petsc/miscellaneous/external.html), extending PETSc functionality, e.g. direct linear system solvers, preconditioners or partitioners. See below a list of libraries currently included in Anselm `petsc` modules.
 
 All these libraries can be used also alone, without PETSc. Their static or shared program libraries are available in
-`$PETSC_DIR/$PETSC_ARCH/lib` and header files in `$PETSC_DIR/$PETSC_ARCH/include`. `PETSC_DIR` and `PETSC_ARCH` are environment variables pointing to a specific PETSc instance based on the petsc module loaded.
-
-### Libraries Linked to PETSc on Anselm (As of 11 April 2015)
+`$PETSC_DIR/$PETSC_ARCH/lib` and header files in `$PETSC_DIR/$PETSC_ARCH/include`. `PETSC_DIR` and `PETSC_ARCH` are environment variables pointing to a specific PETSc instance based on the PETSc module loaded.
 
 * dense linear algebra
   * [Elemental](http://libelemental.org/)
diff --git a/docs.it4i/anselm/software/numerical-libraries/trilinos.md b/docs.it4i/anselm/software/numerical-libraries/trilinos.md
index 42f8bc0dc4ca5318cca883193e5fc61eb207b9b1..67486ad6647af068722716b40dd7f876ac175dfe 100644
--- a/docs.it4i/anselm/software/numerical-libraries/trilinos.md
+++ b/docs.it4i/anselm/software/numerical-libraries/trilinos.md
@@ -34,7 +34,7 @@ First, load the appropriate module:
 
 For the compilation of CMake-aware project, Trilinos provides the FIND_PACKAGE( Trilinos ) capability, which makes it easy to build against Trilinos, including linking against the correct list of libraries. For details, see <http://trilinos.sandia.gov/Finding_Trilinos.txt>
 
-For compiling using simple makefiles, Trilinos provides Makefile.export system, which allows users to include important Trilinos variables directly into their makefiles. This can be done simply by inserting the following line into the makefile:
+For compiling using simple Makefiles, Trilinos provides Makefile.export system, which allows users to include important Trilinos variables directly into their Makefiles. This can be done simply by inserting the following line into the Makefile:
 
 ```bash
     include Makefile.export.Trilinos
@@ -46,4 +46,4 @@ or
     include Makefile.export.<package>
 ```
 
-if you are interested only in a specific Trilinos package. This will give you access to the variables such as Trilinos_CXX_COMPILER, Trilinos_INCLUDE_DIRS, Trilinos_LIBRARY_DIRS etc. For the detailed description and example makefile see <http://trilinos.sandia.gov/Export_Makefile.txt>.
+if you are interested only in a specific Trilinos package. This will give you access to the variables such as Trilinos_CXX_COMPILER, Trilinos_INCLUDE_DIRS, Trilinos_LIBRARY_DIRS etc. For the detailed description and example Makefile see <http://trilinos.sandia.gov/Export_Makefile.txt>.
diff --git a/docs.it4i/anselm/software/omics-master/diagnostic-component-team.md b/docs.it4i/anselm/software/omics-master/diagnostic-component-team.md
index d8d0c4fc4e26a25550cb96b6dbe16a7a587fecf5..6dc3cb9535a0b407d447c09834a5933745d34709 100644
--- a/docs.it4i/anselm/software/omics-master/diagnostic-component-team.md
+++ b/docs.it4i/anselm/software/omics-master/diagnostic-component-team.md
@@ -11,7 +11,7 @@ TEAM is available at the [following address](http://omics.it4i.cz/team/)
 
 VCF files are scanned by this diagnostic tool for known diagnostic disease-associated variants. When no diagnostic mutation is found, the file can be sent to the disease-causing gene discovery tool to see whether new disease associated variants can be found.
 
-TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between the predicted mutations and the final diagnostic in targeted enrichment sequencing analysis. The tool searches for known diagnostic mutations, corresponding to a disease panel, among the predicted patient’s variants. Diagnostic variants for the disease are taken from four databases of disease-related variants (HGMD-public, HUMSAVAR , ClinVar and COSMIC) If no primary diagnostic variant is found, then a list of secondary findings that can help to establish a diagnostic is produced. TEAM also provides with an interface for the definition of and customization of panels, by means of which, genes and mutations can be added or discarded to adjust panel definitions.
+TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between the predicted mutations and the final diagnostic in targeted enrichment sequencing analysis. The tool searches for known diagnostic mutations, corresponding to a disease panel, among the predicted patient’s variants. Diagnostic variants for the disease are taken from four databases of disease-related variants (HGMD, HUMSAVAR , ClinVar and COSMIC) If no primary diagnostic variant is found, then a list of secondary findings that can help to establish a diagnostic is produced. TEAM also provides with an interface for the definition of and customization of panels, by means of which, genes and mutations can be added or discarded to adjust panel definitions.
 
 ![Interface of the application. Panels for defining targeted regions of interest can be set up by just drag and drop known disease genes or disease definitions from the lists. Thus, virtual panels can be interactively improved as the knowledge of the disease increases.](../../../img/fig5.png)
 
diff --git a/docs.it4i/anselm/software/virtualization.md b/docs.it4i/anselm/software/virtualization.md
index a5c7c95aa5f2c1df601606ecc42ed2c8398fb249..2133f6b9ae0eb782346c5d0437c7284f4c0c92f3 100644
--- a/docs.it4i/anselm/software/virtualization.md
+++ b/docs.it4i/anselm/software/virtualization.md
@@ -73,7 +73,7 @@ QEMU currently supports these image types or formats:
 * vmdk - VMware 3 & 4, or 6 image format, for exchanging images with that product
 * vdi - VirtualBox 1.1 compatible image format, for exchanging images with VirtualBox.
 
-You can convert your existing image using qemu-img convert command. Supported formats of this command are: blkdebug blkverify bochs cloop cow dmg file ftp ftps host_cdrom host_device host_floppy http https nbd parallels qcow qcow2 qed raw sheepdog tftp vdi vhdx vmdk vpc vvfat.
+You can convert your existing image using `qemu-img convert` command. Supported formats of this command are: `blkdebug blkverify bochs cloop cow dmg file ftp ftps host_cdrom host_device host_floppy http https nbd parallels qcow qcow2 qed raw sheepdog tftp vdi vhdx vmdk vpc vvfat`.
 
 We recommend using advanced QEMU native image format qcow2.
 
@@ -89,7 +89,7 @@ Remove all unnecessary software and files.
 
 Remove all paging space, swap files, partitions, etc.
 
-Shrink your image. (It is recommended to zero all free space and reconvert image using qemu-img.)
+Shrink your image. (It is recommended to zero all free space and reconvert image using `qemu-img`.)
 
 ### Modify Your Image for Running Jobs
 
@@ -228,7 +228,7 @@ Run virtual machine (simple)
     $ qemu-system-x86_64 -hda win.img   -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -vnc :0
 ```
 
-You can access virtual machine by VNC viewer (option -vnc) connecting to IP address of compute node. For VNC you must use VPN network.
+You can access virtual machine by VNC viewer (option `-vnc`) connecting to IP address of compute node. For VNC you must use VPN network.
 
 Install virtual machine from ISO file
 
@@ -301,7 +301,7 @@ Run SLIRP daemon over SSH tunnel on login node and connect it to virtual network
     $ dpipe vde_plug /tmp/sw0 = ssh login1 $VDE2_DIR/bin/slirpvde -s - --dhcp &
 ```
 
-Run qemu using vde network back-end, connect to created virtual switch.
+Run QEMU using VDE network back-end, connect to created virtual switch.
 
 Basic setup (obsolete syntax)
 
@@ -323,11 +323,11 @@ Optimized setup
 
 #### TAP Interconnect
 
-Both user and vde network back-end have low performance. For fast interconnect (10 Gbit/s and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device.
+Both user and VDE network back-end have low performance. For fast interconnect (10 Gbit/s and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device.
 
 Cluster Anselm provides TAP device tap0 for your job. TAP interconnect does not provide any services (like NAT, DHCP, DNS, SMB, etc.) just raw networking, so you should provide your services if you need them.
 
-Run qemu with TAP network back-end:
+Run QEMU with TAP network back-end:
 
 ```bash
     $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net1
@@ -338,9 +338,9 @@ Interface tap0 has IP address 192.168.1.1 and network mask 255.255.255.0 (/24).
 
 Redirected ports:
 
-* DNS udp/53->udp/3053, tcp/53->tcp3053
-* DHCP udp/67->udp3067
-* SMB tcp/139->tcp3139, tcp/445->tcp3445).
+* DNS UDP/53-&gt;UDP/3053, TCP/53-&gt;TCP/3053
+* DHCP UDP/67-&gt;UDP/3067
+* SMB TCP/139-&gt;TCP/3139, TCP/445-&gt;TCP/3445).
 
 You can configure IP address of virtual machine statically or dynamically. For dynamic addressing provide your DHCP server on port 3067 of tap0 interface, you can also provide your DNS server on port 3053 of tap0 interface for example:
 
diff --git a/docs.it4i/anselm/storage.md b/docs.it4i/anselm/storage.md
index 7beb9678fb422baa514d9393af5d94539c8f000d..fa99fef03e7b3a426095241d6e6608f528b74c9d 100644
--- a/docs.it4i/anselm/storage.md
+++ b/docs.it4i/anselm/storage.md
@@ -1,6 +1,6 @@
 # Storage
 
-There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, ramdisk and tmp file systems.
+There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, RAM disk and tmp file systems.
 
 ## Archiving
 
@@ -357,7 +357,7 @@ First, create the mount point
     $ mkdir cesnet
 ```
 
-Mount the storage. Note that you can choose among the ssh.du1.cesnet.cz (Plzen), ssh.du2.cesnet.cz (Jihlava), ssh.du3.cesnet.cz (Brno) Mount tier1_home **(only 5120M !)**:
+Mount the storage. Note that you can choose among the ssh.du1.cesnet.cz (Plzen), ssh.du2.cesnet.cz (Jihlava), ssh.du3.cesnet.cz (Brno) Mount tier1_home **(only 5120 MB !)**:
 
 ```bash
     $ sshfs username@ssh.du1.cesnet.cz:. cesnet/
@@ -389,16 +389,23 @@ Once done, please remember to unmount the storage
     $ fusermount -u cesnet
 ```
 
+<<<<<<< HEAD:docs.it4i/anselm/storage.md
 ### Rsync Access
 
 !!! note
     Rsync provides delta transfer for best performance, can resume interrupted transfers
+=======
+### RSYNC access
 
-Rsync is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination.  Rsync is widely used for backups and mirroring and as an improved copy command for everyday use.
+!!! Note "Note"
+	RSYNC provides delta transfer for best performance, can resume interrupted transfers
+>>>>>>> Spelling corrections:docs.it4i/anselm-cluster-documentation/storage.md
 
-Rsync finds files that need to be transferred using a "quick check" algorithm (by default) that looks for files that have changed in size or in last-modified time.  Any changes in the other preserved attributes (as requested by options) are made on the destination file directly when the quick check indicates that the file's data does not need to be updated.
+RSYNC is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination.  RSYNC is widely used for backups and mirroring and as an improved copy command for everyday use.
 
-[More about Rsync](https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele)
+RSYNC finds files that need to be transferred using a "quick check" algorithm (by default) that looks for files that have changed in size or in last-modified time.  Any changes in the other preserved attributes (as requested by options) are made on the destination file directly when the quick check indicates that the file's data does not need to be updated.
+
+[More about RSYNC](https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele)
 
 Transfer large files to/from CESNET storage, assuming membership in the Storage VO
 
diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md
index b9c6951295a6b4d96fceb53c6d383464bee6d5c1..ccaae883548d35058bcea80e72f0740b136cb4fc 100644
--- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md
+++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md
@@ -118,7 +118,7 @@ However this method does not seem to work with recent Linux distributions and yo
 
 ## Gnome on Windows
 
-Use Xlaunch to start the Xming server or run the XWin.exe. Select the "One window" mode.
+Use XLaunch to start the Xming server or run the XWin.exe. Select the "One window" mode.
 
 Log in to the cluster, using PuTTY. On the cluster, run the gnome-session command.
 
diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md
index a2a4d429fc06d4943a0ab89df247f410ccdc4bd2..36dd1323fb528f540dd58ae4dc99ee1224b74751 100644
--- a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md
+++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md
@@ -107,4 +107,4 @@ In this example, we add an additional public key, stored in file additional_key.
 
 ## How to Remove Your Own Key
 
-Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (eg. _username@organization.example.com_).
+Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (e.g. _username@organization.example.com_).
diff --git a/docs.it4i/salomon/environment-and-modules.md b/docs.it4i/salomon/environment-and-modules.md
index 9671013566e7621e42b2d0cdf693eed783f13197..1da5d4e3ddc1fcf9306d9fbe7df43ab83d27872f 100644
--- a/docs.it4i/salomon/environment-and-modules.md
+++ b/docs.it4i/salomon/environment-and-modules.md
@@ -24,7 +24,7 @@ fi
 ```
 
 !!! note
-    Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example.
+    Do not run commands outputting to standard output (echo, module list, etc) in .bashrc  for non-interactive SSH sessions. It breaks fundamental functionality (SCP, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example.
 
 ### Application Modules
 
diff --git a/docs.it4i/salomon/introduction.md b/docs.it4i/salomon/introduction.md
index 83ff79221fc01aadbf0cfa1258220778bc275308..bc466a8d89a4fb78292e076744ca35563511a646 100644
--- a/docs.it4i/salomon/introduction.md
+++ b/docs.it4i/salomon/introduction.md
@@ -1,6 +1,6 @@
 # Introduction
 
-Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128 GB RAM. Nodes are interconnected by 7D Enhanced hypercube InfiniBand network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/).
+Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129 TB RAM and giving over 2 PFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128 GB RAM. Nodes are interconnected by 7D Enhanced hypercube InfiniBand network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/).
 
 The cluster runs [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg)
 
diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md
index e7a4c4ff0039815504804e9f5fcb30959e8713e6..db8080a2262c61046965a1f86b9d9a14527f81a6 100644
--- a/docs.it4i/salomon/job-submission-and-execution.md
+++ b/docs.it4i/salomon/job-submission-and-execution.md
@@ -422,7 +422,7 @@ cd $SCRDIR || exit
 cp $PBS_O_WORKDIR/input .
 cp $PBS_O_WORKDIR/mympiprog.x .
 
-# load the mpi module
+# load the MPI module
 module load OpenMPI
 
 # execute the calculation
@@ -460,7 +460,7 @@ Example jobscript for an MPI job with preloaded inputs and executables, options
 SCRDIR=/scratch/work/user/$USER/myjob
 cd $SCRDIR || exit
 
-# load the mpi module
+# load the MPI module
 module load OpenMPI
 
 # execute the calculation
diff --git a/docs.it4i/salomon/software/ansys/ansys-cfx.md b/docs.it4i/salomon/software/ansys/ansys-cfx.md
index 21ce8f93b16958a184d15af5235830e9d39406b9..2cf29101dd4ab162de9c7f52e2b19a58cc715f42 100644
--- a/docs.it4i/salomon/software/ansys/ansys-cfx.md
+++ b/docs.it4i/salomon/software/ansys/ansys-cfx.md
@@ -49,7 +49,7 @@ echo Machines: $hl
 
 Header of the pbs file (above) is common and description can be find on [this site](../../job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
-Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def
+Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def
 
 **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**.
 [More about licensing here](licensing/)
diff --git a/docs.it4i/salomon/software/ansys/ansys-fluent.md b/docs.it4i/salomon/software/ansys/ansys-fluent.md
index 33e711b285cc8066604c43ebb7c943dcb1294fb6..f4867b5c75bcaa7b3e52400b601dd1d0cce43fe3 100644
--- a/docs.it4i/salomon/software/ansys/ansys-fluent.md
+++ b/docs.it4i/salomon/software/ansys/ansys-fluent.md
@@ -3,7 +3,7 @@
 [ANSYS Fluent](http://www.ansys.com/products/fluids/ansys-fluent)
 software contains the broad physical modeling capabilities needed to model flow, turbulence, heat transfer, and reactions for industrial applications ranging from air flow over an aircraft wing to combustion in a furnace, from bubble columns to oil platforms, from blood flow to semiconductor manufacturing, and from clean room design to wastewater treatment plants. Special models that give the software the ability to model in-cylinder combustion, aeroacoustics, turbomachinery, and multiphase systems have served to broaden its reach.
 
-1. Common way to run Fluent over pbs file
+1. Common way to run Fluent over PBS file
 
 To run ANSYS Fluent in batch mode you can utilize/modify the default fluent.pbs script and execute it via the qsub command.
 
diff --git a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md
index 8646c26665ea9f10d6d70405e961f1e2efe7fbb9..89f53c988715cee93da0af78988b42a49438f6e1 100644
--- a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md
+++ b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md
@@ -50,6 +50,10 @@ echo Machines: $hl
 /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl
 ```
 
+<<<<<<< HEAD
 Header of the pbs file (above) is common and description can be find on [this site](../../job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+=======
+Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+>>>>>>> Spelling corrections
 
-Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i=
+Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i=
diff --git a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md
index c1562c1c23ca09fe308536c45f1c903ab8384b3e..0bde6f3a1c8b5d729f0de715863186b189beb74d 100644
--- a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md
+++ b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md
@@ -49,8 +49,8 @@ echo Machines: $hl
 /ansys_inc/v145/ansys/bin/ansys145 -b -dis -p aa_r -i input.dat -o file.out -machines $hl -dir $WORK_DIR
 ```
 
-Header of the pbs file (above) is common and description can be find on [this site](../../resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
+Header of the PBS file (above) is common and description can be find on [this site](../../resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources.
 
-Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ansys solver via parameter -i
+Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ansys solver via parameter -i
 
 **License** should be selected by parameter -p. Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**, aa_r_dy (ANSYS **Academic** AUTODYN) [More about licensing here](licensing/)
diff --git a/docs.it4i/salomon/software/ansys/ansys.md b/docs.it4i/salomon/software/ansys/ansys.md
index f93524a3e580f8a5c83302f8d1cd9997bb68c2be..a5cac322dbc77eb794401e4dd964dd79b9fbbc23 100644
--- a/docs.it4i/salomon/software/ansys/ansys.md
+++ b/docs.it4i/salomon/software/ansys/ansys.md
@@ -2,7 +2,7 @@
 
 **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM)
 
-Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's pbs file (see individual products). [More about licensing here](licensing/)
+Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [More about licensing here](licensing/)
 
 To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module:
 
diff --git a/docs.it4i/salomon/software/ansys/licensing.md b/docs.it4i/salomon/software/ansys/licensing.md
index 04ff6513349ccede25a0846dd21227251e954732..1dec4471cc4e28fbb7b5b0aa6ff18dfa3d6d437a 100644
--- a/docs.it4i/salomon/software/ansys/licensing.md
+++ b/docs.it4i/salomon/software/ansys/licensing.md
@@ -1,6 +1,6 @@
 # Licensing and Available Versions
 
-## ANSYS Licence Can Be Used By:
+## ANSYS license can be used by:
 
 * all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.)
 * all persons who have a valid license
diff --git a/docs.it4i/salomon/software/ansys/setting-license-preferences.md b/docs.it4i/salomon/software/ansys/setting-license-preferences.md
index fe14541d46b1fe4cab38eb7b883c58e40e03dd32..b0739a900b2a1aea4991a18f50006f33d157afa7 100644
--- a/docs.it4i/salomon/software/ansys/setting-license-preferences.md
+++ b/docs.it4i/salomon/software/ansys/setting-license-preferences.md
@@ -1,6 +1,6 @@
 # Setting license preferences
 
-Some ANSYS tools allow you to explicitly specify usage of academic or commercial licenses in the command line (eg. ansys161 -p aa_r to select Academic Research license). However, we have observed that not all tools obey this option and choose commercial license.
+Some ANSYS tools allow you to explicitly specify usage of academic or commercial licenses in the command line (e.g. ansys161 -p aa_r to select Academic Research license). However, we have observed that not all tools obey this option and choose commercial license.
 
 Thus you need to configure preferred license order with ANSLIC_ADMIN. Please follow these steps and move Academic Research license to the top or bottom of the list accordingly.
 
diff --git a/docs.it4i/salomon/software/ansys/workbench.md b/docs.it4i/salomon/software/ansys/workbench.md
index 8ed07d789dea69798e68c177ac1612a3e391ec88..fb37e6a7c4cdfd1731852ae175099da0a89df4cc 100644
--- a/docs.it4i/salomon/software/ansys/workbench.md
+++ b/docs.it4i/salomon/software/ansys/workbench.md
@@ -6,13 +6,13 @@ It is possible to run Workbench scripts in batch mode. You need to configure sol
 
 ![](../../../img/AMsetPar1.png)
 
-Enable Distribute Solution checkbox and enter number of cores (eg. 48 to run on two Salomon nodes). If you want the job to run on more then 1 node, you must also provide a so called MPI appfile. In the Additional Command Line Arguments input field, enter:
+Enable Distribute Solution checkbox and enter number of cores (e.g. 48 to run on two Salomon nodes). If you want the job to run on more then 1 node, you must also provide a so called MPI appfile. In the Additional Command Line Arguments input field, enter:
 
 ```bash
     -mpifile /path/to/my/job/mpifile.txt
 ```
 
-Where /path/to/my/job is the directory where your project is saved. We will create the file mpifile.txt programatically later in the batch script. For more information, refer to \*ANSYS Mechanical APDL Parallel Processing\* \*Guide\*.
+Where /path/to/my/job is the directory where your project is saved. We will create the file mpifile.txt programmatically later in the batch script. For more information, refer to \*ANSYS Mechanical APDL Parallel Processing\* \*Guide\*.
 
 Now, save the project and close Workbench. We will use this script to launch the job:
 
diff --git a/docs.it4i/salomon/software/chemistry/nwchem.md b/docs.it4i/salomon/software/chemistry/nwchem.md
index a26fc701ee44585dbab1f942685b92d9190adfa5..465adf2042cca69f801af48a2eb104f215a44187 100644
--- a/docs.it4i/salomon/software/chemistry/nwchem.md
+++ b/docs.it4i/salomon/software/chemistry/nwchem.md
@@ -40,4 +40,4 @@ The recommend to use version 6.5. Version 6.3 fails on Salomon nodes with accele
 Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives :
 
 * MEMORY : controls the amount of memory NWChem will use
-* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct"
+* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g. `scf direct`
diff --git a/docs.it4i/salomon/software/chemistry/phono3py.md b/docs.it4i/salomon/software/chemistry/phono3py.md
index 3f747d23bc9775f80137c0d6e4f1b4821d97439b..275c23a36b7664d2c8f9bf2251ca4e0d44ff812e 100644
--- a/docs.it4i/salomon/software/chemistry/phono3py.md
+++ b/docs.it4i/salomon/software/chemistry/phono3py.md
@@ -87,7 +87,7 @@ Once all jobs are finished and vasprun.xml is created in each disp-XXXXX directo
 $ phono3py --cf3 disp-{00001..00111}/vasprun.xml
 ```
 
-and `disp_fc2.yaml, FORCES_FC2`, `FORCES_FC3` and disp_fc3.yaml should appear and put into the hdf format by
+and `disp_fc2.yaml, FORCES_FC2`, `FORCES_FC3` and disp_fc3.yaml should appear and put into the HDF format by
 
 ```bash
 $ phono3py --dim="2 2 2" -c POSCAR
diff --git a/docs.it4i/salomon/software/compilers.md b/docs.it4i/salomon/software/compilers.md
index 8e62965ff71b3afbd4e178c5019a0101597401b5..f629a33981976dee744c2c296a28603c82bd651e 100644
--- a/docs.it4i/salomon/software/compilers.md
+++ b/docs.it4i/salomon/software/compilers.md
@@ -11,7 +11,7 @@ There are several compilers for different programming languages available on the
 
 The C/C++ and Fortran compilers are provided by:
 
-Opensource:
+Open source:
 
 * GNU GCC
 * Clang/LLVM
@@ -82,11 +82,11 @@ For more information about the possibilities of the compilers, please see the ma
 UPC is supported by two compiler/runtime implementations:
 
 * GNU - SMP/multi-threading support only
-* Berkley - multi-node support as well as SMP/multi-threading support
+* Berkeley - multi-node support as well as SMP/multi-threading support
 
 ### GNU UPC Compiler
 
-To use the GNU UPC compiler and run the compiled binaries use the module gupc
+To use the GNU UPC compiler and run the compiled binaries use the module GUPC
 
 ```bash
     $ module add gupc
@@ -127,18 +127,18 @@ To run the example with 5 threads issue
 
 For more information see the man pages.
 
-### Berkley UPC Compiler
+### Berkeley UPC Compiler
 
-To use the Berkley UPC compiler and runtime environment to run the binaries use the module bupc
+To use the Berkeley UPC compiler and runtime environment to run the binaries use the module BUPC
 
 ```bash
     $ module add BerkeleyUPC/2.16.2-gompi-2015b
     $ upcc -version
 ```
 
-As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only.
+As default UPC network the "SMP" is used. This is very quick and easy way for testing/debugging, but limited to one node only.
 
-For production runs, it is recommended to use the native InfiniBand implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended.
+For production runs, it is recommended to use the native InfiniBand implementation of UPC network "IBV". For testing/debugging using multiple nodes, the "MPI" UPC network is recommended.
 
 !!! warning
     Selection of the network is done at the compile time and not at runtime (as expected)!
@@ -162,7 +162,7 @@ Example UPC code:
     }
 ```
 
-To compile the example with the "ibv" UPC network use
+To compile the example with the "IBV" UPC network use
 
 ```bash
     $ upcc -network=ibv -o hello.upc.x hello.upc
diff --git a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md
index 05a6d2944b2e8db354e134c8f506f87b70f0531a..febf08877a211c028d0acd78b3c193a1b65f390b 100644
--- a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md
+++ b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md
@@ -115,4 +115,4 @@ cd /apps/cae/COMSOL/51/mli
 matlab -nodesktop -nosplash -r "mphstart; addpath /scratch/work/user/$USER/work; test_job"
 ```
 
-This example shows how to run Livelink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The Matlab command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number.
+This example shows how to run LiveLink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The Matlab command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number.
diff --git a/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md b/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md
index 4358b930fedbfcdf3ea9277d2fa5c89e8a74ca37..be5a15f251c6b32dd3d432da682ef9651c3683ce 100644
--- a/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md
+++ b/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md
@@ -1,6 +1,6 @@
 # Licensing and Available Versions
 
-## Comsol Licence Can Be Used By:
+## Comsol License Can Be Used By:
 
 * all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.)
 * all persons who have a valid license
diff --git a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md
index 3d0826e994bb6434b9cd0cd100249393191c03d3..52b484844c3e6636e1179f83abd341773370e8f7 100644
--- a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md
+++ b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md
@@ -28,7 +28,7 @@ Instead of [running your MPI program the usual way](../mpi/mpi/), use the the pe
     $ perf-report mpirun ./mympiprog.x
 ```
 
-The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](../../job-submission-and-execution/).
+The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](../../job-submission-and-execution/).
 
 ## Example
 
diff --git a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
index 2fdbd18e166d3e553a8ad5719f7945f902cbd73c..09a57296991332664f6d4f3ec9e81f25ec74fe8b 100644
--- a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
+++ b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md
@@ -19,7 +19,7 @@ To profile an application with VTune Amplifier, special kernel modules need to b
     $ qsub -q qexp -A OPEN-0-0 -I -l select=1,vtune=2016_update1
 ```
 
-After that, you can verify the modules sep\*, pax and vtsspp are present in the kernel :
+After that, you can verify the modules `sep*`, `pax` and `vtsspp` are present in the kernel :
 
 ```bash
     $ lsmod | grep -e sep -e pax -e vtsspp
@@ -42,7 +42,7 @@ and launch the GUI :
 
 The GUI will open in new window. Click on "New Project..." to create a new project. After clicking OK, a new window with project properties will appear.  At "Application:", select the bath to your binary you want to profile (the binary should be compiled with -g flag). Some additional options such as command line arguments can be selected. At "Managed code profiling mode:" select "Native" (unless you want to profile managed mode .NET/Mono applications). After clicking OK, your project is created.
 
-To run a new analysis, click "New analysis...". You will see a list of possible analysis. Some of them will not be possible on the current CPU (eg. Intel Atom analysis is not possible on Sandy bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "Advanced Hotspots". Clicking on Start will start profiling of the application.
+To run a new analysis, click "New analysis...". You will see a list of possible analysis. Some of them will not be possible on the current CPU (e.g. Intel Atom analysis is not possible on Sandy bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "Advanced Hotspots". Clicking on Start will start profiling of the application.
 
 ## Remote Analysis
 
diff --git a/docs.it4i/salomon/software/debuggers/valgrind.md b/docs.it4i/salomon/software/debuggers/valgrind.md
index 430118785a08bc43e67a4711396f9ac6b63c4afb..5e6a2c8e8246888a60e7026bb04ab433b921f658 100644
--- a/docs.it4i/salomon/software/debuggers/valgrind.md
+++ b/docs.it4i/salomon/software/debuggers/valgrind.md
@@ -262,4 +262,4 @@ Prints this output : (note that there is output printed for every launched MPI p
     ==31319== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 4 from 4)
 ```
 
-We can see that Valgrind has reported use of unitialised memory on the master process (which reads the array to be broadcasted) and use of unaddresable memory on both processes.
+We can see that Valgrind has reported use of uninitialised memory on the master process (which reads the array to be broadcasted) and use of unaddressable memory on both processes.
diff --git a/docs.it4i/salomon/software/debuggers/vampir.md b/docs.it4i/salomon/software/debuggers/vampir.md
index 99053546c14b43c51d5ab7728dfa3824f2016170..f93680b58b9c81d68c66cab796beaac343726785 100644
--- a/docs.it4i/salomon/software/debuggers/vampir.md
+++ b/docs.it4i/salomon/software/debuggers/vampir.md
@@ -1,6 +1,6 @@
 # Vampir
 
-Vampir is a commercial trace analysis and visualisation tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces.
+Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces.
 
 ![](../../../img/Snmekobrazovky20160708v12.33.35.png)
 
diff --git a/docs.it4i/salomon/software/intel-suite/intel-compilers.md b/docs.it4i/salomon/software/intel-suite/intel-compilers.md
index 63a05bd91e15c04afa6a3cc8d21231ba030437bc..29b1f39431cbf10c723d529f814e518107391cc0 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-compilers.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-compilers.md
@@ -1,6 +1,6 @@
 # Intel Compilers
 
-The Intel compilers in multiple versions are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler.
+The Intel compilers in multiple versions are available, via module intel. The compilers include the icc C and C++ compiler and the ifort Fortran 77/90/95 compiler.
 
 ```bash
     $ module load intel
@@ -17,7 +17,7 @@ For maximum performance on the Salomon cluster compute nodes, compile your progr
     $ ifort -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec myprog.f mysubroutines.f -o myprog.x
 ```
 
-In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggresive loop optimizations (-O3) and vectorization (-xCORE-AVX2)
+In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggressive loop optimizations (-O3) and vectorization (-xCORE-AVX2)
 
 The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP parallelization and AVX2 vectorization. Enable the OpenMP parallelization by the **-openmp** compiler switch.
 
diff --git a/docs.it4i/salomon/software/intel-suite/intel-debugger.md b/docs.it4i/salomon/software/intel-suite/intel-debugger.md
index d0fef6ab7fbe2e50e8e7f8238585521bb5cb9695..a1c2af521a53af775987e1ba1f5ebb9a8c42f129 100644
--- a/docs.it4i/salomon/software/intel-suite/intel-debugger.md
+++ b/docs.it4i/salomon/software/intel-suite/intel-debugger.md
@@ -71,6 +71,6 @@ Run the idb debugger from within the MPI debug option. This will cause the debug
 
 Run the idb debugger in GUI mode. The menu Parallel contains number of tools for debugging multiple threads. One of the most useful tools is the **Serialize Execution** tool, which serializes execution of concurrent threads for easy orientation and identification of concurrency related bugs.
 
-## Further Information
+## Further information
 
-Exhaustive manual on idb features and usage is published at Intel website, <https://software.intel.com/sites/products/documentation/doclib/iss/2013/compiler/cpp-lin/>
+Exhaustive manual on IDB features and usage is published at Intel website, <https://software.intel.com/sites/products/documentation/doclib/iss/2013/compiler/cpp-lin/>