Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • sccs/docs.it4i.cz
  • soj0018/docs.it4i.cz
  • lszustak/docs.it4i.cz
  • jarosjir/docs.it4i.cz
  • strakpe/docs.it4i.cz
  • beranekj/docs.it4i.cz
  • tab0039/docs.it4i.cz
  • davidciz/docs.it4i.cz
  • gui0013/docs.it4i.cz
  • mrazek/docs.it4i.cz
  • lriha/docs.it4i.cz
  • it4i-vhapla/docs.it4i.cz
  • hol0598/docs.it4i.cz
  • sccs/docs-it-4-i-cz-fumadocs
  • siw019/docs-it-4-i-cz-fumadocs
15 results
Select Git revision
Show changes
Showing
with 2634 additions and 0 deletions
{"total": 415, "projects": {"MIKE": "default", "SIP": "4.17-Python-2.7.9", "HDF5": "1.10.0-patch1-intel-2016.01-mic", "p4vasp": "0.3.29-GNU-4.9.3-2.25", "Automake": "1.15-GNU-5.1.0-2.25", "netcdf": "4.3.0", "bullxde": "2.0", "APR-util": "1.5.4-foss-2015g", "ScaLAPACK": "2.0.2-OpenBLAS-0.2.14-LAPACK-3.5.0", "BerkeleyUPC": "2.16.2-gompi-2015b", "BWA": "0.7.5a-foss-2015g", "openmpi": "1.8.1-icc", "matlab": "R2014a-EDU", "sympy": "0.7.6-intel-2016.01-Python-2.7.9", "kbproto": "1.0.7-intel-2016a", "lsprepost": "4.2", "prace": "20160107-intel-2016.01", "mpt": "2.12", "Bison": "3.0.4-GCC-4.9.3", "totalview": "8.13", "Wine": "1.7.29-GNU-5.1.0-2.25", "opari2": "1.1.2-icc", "MAP": "5.0.1", "libyaml": "0.1.6-intel-2015b", "mercurial": "2.9.1", "beopest": "13.3", "perfsuite": "1a5.3", "PSBLAS-ext": "1.0-4-GCC-4.9.3-2.25", "OSPRay": "0.9.1", "S4MPLE": "1.0.0", "libxslt": "1.1.28-intel-2015b", "hwloc": "1.11.5-GCC-6.3.0-2.27", "libunistring": "0.9.3-intel-2015b", "QGIS": "2.12.3-foss-2015g", "ngsPipeline": "1.0.0", "boost": "1.56-icc-impi", "matplotlib": "1.4.3-intel-2015b-Python-2.7.9", "openfoam": "2.2.2-icc-openmpi1.8.1-DP", "Szip": "2.1-intel-2017a", "PROJ_4": "4.9.2-foss-2015g", "phono3py": "1.11.7.8-intel-2015b-Python-2.7.11", "CMake": "3.7.2-intel-2017a", "COMSOL": "51-EDU", "hdf5": "1.8.13", "gimkl": "2.11.5", "xineramaproto": "1.2.1-intel-2015b", "xextproto": "7.3.0-intel-2016a", "GLM": "0.9.7.2-intel-2017a", "SWIG": "3.0.7-Python-2.7.9", "tmux": "2.3", "ipm": "0.983-icc-impi", "SUMO": "0.27.1-foss-2015g", "ipp": "15.3.187", "hdf5-parallel": "1.8.13-gcc49", "PrgEnv-intel": "15.0.3", "libxcb": "1.11-Python-2.7.9", "MPI_NET": "1.2.0-intel-2016.01", "QEMU": "2.1.2-GCC-4.4.7-system-VDE2", "cp2k-mpi": "2.5.1-gcc", "OTF2": "2.0-intel-2015b-mic", "VirtualGL": "2.4.1", "Armadillo": "7.500.0-foss-2016a-Python-3.5.2", "netcdf-fortran": "4.2", "perfcatcher": "1.0", "tk": "8.5.15", "itac": "9.1.2.024", "LAPACKE": "3.5.0-LAPACK-3.5.0", "PrgEnv-gnu": "4.8.1", "libICE": "1.0.9-intel-2015b", "Rstudio": "0.97", "VisIt": "2.10.0", "virtualgl": "2.4", "Scipion": "1.0.1-Java-1.8.0_112-OpenMPI-1.10.2-GCC-5.3.0-2.26", "grace": "5.1.25-intel-2015b", "ANSYS": "18.0", "ATLAS": "3.10.1-GCC-4.9.3-2.25-LAPACK-3.4.2", "Scalasca": "2.3.1-intel-2015b", "BCFtools": "1.3-foss-2015g", "gcc": "5.4.0", "lxml": "3.4.4-intel-2015b-Python-2.7.9", "lsdyna": "7.x.x", "PGI": "16.10-GNU-4.9.3-2.25", "advisor_xe": "2015.1.10.380555", "CUDA": "8.0.44-intel-2017.00", "gatk": "2.6-4", "Spark": "1.5.2", "ifort": "2017.1.132-GCC-6.3.0-2.27", "lam": "7.1.4-icc", "PyYAML": "3.11-intel-2015b-Python-2.7.9", "tcsh": "6.19.00", "gperf": "3.0.4-intel-2016a", "METIS": "5.1.0-intel-2017.00", "Digimat": "5.0.1-EDU", "pigz": "2.3.3-GCC-6.2.0-2.27", "Autotools": "20150215-GNU-5.1.0-2.25", "parallel": "20150322-GNU-5.1.0-2.25", "bowtie2": "2.2.3", "QuantumESPRESSO": "5.4.0-intel-2017.00", "CP2K": "2.6.0-intel-2015b", "MATIO": "1.5.2-intel-2017a", "wine": "1.7.29", "libX11": "1.6.3-intel-2016a", "HyperWorks": "13.0", "hpg-aligner": "1.0.0", "PCRE": "8.39-intel-2017.00", "modflow-2005": "1.11.00", "EasyBuild": "3.1.0", "adios": "1.8.0", "GLOBUS": "globus", "picard": "2.1.0", "turbovnc": "1.2.3", "settarg": "7.2.2", "JOE": "4.2", "libSM": "1.2.2-intel-2015b", "pixman": "0.32.6-intel-2015b", "flex": "2.6.3-GCCcore-6.3.0", "libgdiplus": "3.12-GNU-5.1.0-2.25", "python": "3.4.2", "namd": "2.8", "APR": "1.5.2-foss-2015g", "aislinn": "20160105-Python-2.7.9-gompi-2015e", "inspector_xe": "2015.1.2.379161", "h5py": "2.4.0-ictce-7.3.5-Python-2.7.9-serial", "cURL": "7.51.0-intel-2017.00", "SIONlib": "1.6.1-tools", "bupc": "2.16.2", "PAPI": "5.4.3-pic", "PerfReports": "5.0.1", "cairo": "1.12.18-foss-2015b", "Harminv": "1.4-intel-2015b", "Perl": "5.24.0-GCC-4.9.3-2.25-bare", "Lua": "5.1.4-8", "fftw2-mpi": "2.1.5-icc", "mxml": "2.9", "Maven": "3.3.9", "GATK": "3.5-Java-1.7.0_79", "Trimmomatic": "0.35-Java-1.7.0_79", "GCCcore": "6.3.0", "GCC": "6.3.0-2.27", "xcb-proto": "1.11-Python-2.7.9", "hypermesh": "12.0.110", "imkl": "2017.1.132-iimpi-2017a", "Meep": "1.3-intel-2015b", "eudev": "3.1.5-intel-2016a", "Vampir": "9.0.0", "FastQC": "0.11.3", "PROJ": "4.9.2-intel-2017.00", "NASM": "2.11.08-intel-2017.00", "mvapich2": "1.9-icc", "iompi": "2017.01", "OpenCV": "3.0.0-intel-2015b", "ParaView": "5.0.0-binary", "ISL": "0.15-GNU-4.9.3-2.25", "intelpcm": "2.6", "Libint": "1.1.4-intel-2015b", "libreadline": "6.3-intel-2017a", "SpatiaLite": "4.3.0a-foss-2015g", "Clang": "3.7.0-GNU-5.1.0-2.25", "ParMETIS": "4.0.3-intel-2017a", "Mesa": "11.2.1-foss-2016a", "fftw3": "3.3.3-icc", "slepc": "3.7.2-icc16-impi5-mkl-opt", "MPFR": "3.1.5-intel-2017.00", "OpenCL-builder": "2015", "OpenCL-runtime": "15.1", "relion": "1.3", "XZ": "5.2.2-intel-2017.00", "libunwind": "1.1-GCC-5.4.0-2.26", "libevent": "2.1.8", "fftw2": "2.1.5-icc", "dytran": "2013.0.1", "ffmpeg": "2.4-intel-2015b", "M4": "1.4.18-GCCcore-6.3.0", "FFTW": "3.3.6-gompi-2017a", "PyQt": "4.11.4-foss-2015g-Python-2.7.9", "NWChem": "6.5.revision26243-intel-2015b-2014-09-10-Python-2.7.8", "hyperworks": "13.0", "ant": "1.9.3-Java-1.7.0_79", "Forge": "7.0", "arpack-ng": "3.4.0-intel-2017.00", "comsol": "50-EDU", "GEOS": "3.5.0-foss-2015g", "Singularity": "2.2-GCC-6.3.0-2.27", "VTune": "2016_update1", "digimat": "5.0.1", "LLVM": "3.9.0-intel-2017.00", "Qt": "4.8.6-foss-2015g", "fixesproto": "5.0-intel-2016a", "Molpro": "2010.1-patch-57-intel2015b", "libXdamage": "1.1.4-intel-2016a", "bullxmpi": "bullxmpi_1.2.4.1", "scalasca2": "2.0-icc-impi", "openssh-x509": "6.2p2", "mpi.net": "1.0.0-mono-3.12.1", "gimpi": "2.11.5", "R": "3.2.3-intel-2016.01", "Racket": "6.1.1-GNU-5.1.0-2.25", "SCOTCH": "6.0.4-intel-2017a", "fastqc": "0.11.2", "trilinos": "11.2.3-icc", "netcdf-parallel": "4.3.0", "chicken": "4.8.0.6", "OpenBLAS": "0.2.19-GCC-6.3.0-2.27-LAPACK-3.7.0", "blender": "2.71", "PCRE2": "10.22-intel-2017.00", "fontconfig": "2.11.94-intel-2017.00", "Octave": "4.0.1-gimkl-2.11.5", "DCW": "1.1.2", "Qwt": "6.1.2-foss-2015g", "Bash": "4.3", "freetype": "2.6.3-intel-2016a", "cube": "4.2.3-icc", "Valgrind": "3.11.0-intel-2015b", "iimpi": "2017a", "dhi-mike": "default", "tbb": "15.3.187", "guile": "1.8.8-intel-2015b", "PSBLAS": "3.3.4-3-GCC-4.9.3-2.25", "libXfont": "1.5.1-Python-2.7.9", "szip": "2.1", "memoryscape": "3.4", "vampir": "8.2", "libpciaccess": "0.13.4-intel-2016a", "JasPer": "1.900.1-intel-2015b", "racket": "6.0.1", "foss": "2017a", "Boost": "1.61.0-foss-2016a-serial", "FIAT": "1.6.0-intel-2016.01-Python-2.7.9", "PRACE": "prace", "gompi": "2017a", "lux": "1.3.1", "LibTIFF": "4.0.3-intel-2015b", "netCDF-Fortran": "4.4.0-intel-2016.01", "libpng": "1.6.16-intel-2015b", "SuiteSparse": "4.5.3-intel-2017a-ParMETIS-4.0.3", "FOX": "1.6.51-foss-2015g", "DDT": "5.0.1", "libctl": "3.2.2-intel-2015b", "mono": "3.12.1", "valgrind": "3.9.0-impi", "PLUMED": "2.3b-foss-2016a", "SnuCL": "1.3.3-gompi-2015e", "Tcl": "8.6.5-intel-2017a", "libXft": "2.3.2-intel-2015b", "binutils": "2.27-GCCcore-6.3.0", "GPI-2": "1.1.1-gompi-2015e-MPI", "xdrfile": "1.1.4-intel-2015b", "libGLU": "9.0.0-foss-2015g", "otf2": "1.4-icc", "util-linux": "2.28-intel-2016a", "lmod": "7.2.2", "MVAPICH2": "2.1-GNU-5.1.0-2.25", "byacc": "20150711-intel-2015b", "java": "1.7", "marc": "2013.1", "elmer": "7.0-r6695-opt", "HTSlib": "1.3-foss-2015g", "MATLAB": "2015b-EDU", "gupc": "4.8.0.3", "abinit": "7.10.1-icc-impi", "numpy": "1.9.1-intel-2015b-Python-2.7.9", "modflow-nwt": "1.0.9-aquaveo", "Adams": "2013.2", "ncurses": "6.0-intel-2017a", "MUMPS": "5.0.2-intel-2017a-parmetis", "Score-P": "3.0-intel-2015b", "ruby": "2.0.0-p247", "Subversion": "1.8.16-foss-2015g", "NAMD": "2.9-mpi", "zlib": "1.2.11-GCCcore-6.3.0", "xtrans": "1.3.5-intel-2016a", "snpEff": "3.6", "ABINIT": "7.10.1-intel-2015b", "libMesh": "0.9.5-intel-2016.01", "motif": "2.3.4-intel-2015b-libX11-1.6.2", "GNU": "5.1.0-2.25-intel-2015b", "almost": "2.1.0-intel-2015b", "libxml2": "2.9.3-intel-2017.00", "expat": "2.1.0-intel-2017.00", "Code_Saturne": "3.0.5", "opencl-rt": "4.5.0.8", "samtools": "0.1.19", "ictce": "8.3.5", "Python": "3.5.2-intel-2017.00", "make": "3.82-intel-2015b", "Mono": "4.2.2.10-intel-2016.01", "FreeFem++": "3.45-intel-2015b", "SAMtools": "1.3-foss-2015g", "SQLite": "3.13.0-intel-2017a", "HPL": "2.1-intel-2015b", "OpenDX": "4.4.4-foss-2015g", "Autoconf": "2.69-GNU-5.1.0-2.25", "RStudio": "0.98.1103", "globus": "globus", "fontsproto": "2.1.3-intel-2016a", "SDE": "7.41.0", "gzip": "1.6-intel-2015b", "gsl": "1.16-icc", "tcl": "8.5.15", "TotalView": "8.15.4-6-linux-x86-64", "MPICH": "3.2-GCC-5.3.1-snapshot-20160419-2.25", "GSL": "2.1-intel-2015b", "libXfixes": "5.0.1-intel-2016a", "OpenFOAM": "3.0.0-intel-2016.01", "SCons": "2.3.6-Python-2.7.9", "iccifort": "2017.1.132-GCC-6.3.0-2.27", "plasma": "2.6.0", "phonopy": "1.11.6.7-intel-2015b-Python-2.7.11", "libXdmcp": "1.1.2-intel-2016a", "Mercurial": "3.7.3-foss-2015g-Python-2.7.9", "xproto": "7.0.28-intel-2016a", "FLTK": "1.3.2-intel-2015b", "hpg-variant": "1.0.0", "libdrm": "2.4.68-intel-2016a", "intel": "2017a", "nwchem": "6.3-rev2-patch1-venus", "adams": "2013.2", "makedepend": "1.0.5-intel-2016a", "numactl": "2.0.11-GCC-6.3.0-2.27", "vtune_xe": "2015.3.0.403110", "Discovery_Studio": "4.0", "wien2k": "14.2", "help2man": "1.47.4-GCCcore-6.3.0", "xbitmaps": "1.1.1-intel-2015b", "Inspector": "2016_update1", "spGPU": "master-GCC-4.9.3-2.25", "JUnit": "4.11-Java-1.7.0_79", "nastran": "2013.1.1", "pkg-config": "0.29-intel-2016a", "Java": "1.8.0_112", "Marc": "2013.1.0", "magma": "1.3.0-mic", "libXrender": "0.9.8-intel-2015b", "inputproto": "2.3-intel-2015b", "libfontenc": "1.1.3-intel-2016a", "GDAL": "2.1.0-GNU-5.1.0-2.25-intel-2015b", "Cube": "4.3.4-intel-2015b", "icc": "2017.1.132-GCC-6.3.0-2.27", "qemu": "2.1.2-vde2", "fftw3-mpi": "3.3.3-icc", "Ruby": "2.3.1", "libXau": "1.0.8-intel-2016a", "Doxygen": "1.8.11-intel-2017a", "gpi2": "1.1.1", "dataspaces": "1.4.0", "RELION": "1.3-intel-2015b", "libXinerama": "1.1.3-intel-2015b", "Amber": "14", "MPC": "1.0.2-intel-2017.00", "LAMMPS": "28Jun14-intel-2015b", "libjpeg-turbo": "1.4.2-intel-2017.00", "perfboost": "1.0", "python-meep": "1.4.2-intel-2015b-Python-2.7.9-Meep-1.3", "petsc": "3.7.3-icc16-impi5-mkl-opt", "ScientificPython": "2.9.4-intel-2016.01-Python-2.7.9", "cuda": "7.5", "scorep": "1.2.3-icc-impi", "OPARI2": "2.0", "MLD2P4": "2.0-rc4-GCC-4.9.3-2.25", "maxwell": "3.0", "VampirServer": "9.0.0-intel-2015b", "spatialindex": "1.8.5-foss-2015g", "vde2": "2.3.2", "paraview": "4.0.1-gcc481-bullxmpi1.2.4.1-osmesa10.0", "Siesta": "4.1-b2-intel-2017.00", "VASP": "5.4.1-intel-2017.00-24Jun15", "git": "2.11.0-GNU-4.9.3-2.25", "lammps": "28Jun14", "mkl": "15.3.187", "xorg-macros": "1.19.0-intel-2016a", "likwid": "4.1.2-intel", "OpenCoarrays": "1.4.0-GCC-5.3.1-snapshot-20160419-2.25", "GROMACS": "5.1.2-intel-2016a-hybrid", "libXt": "1.1.5-foss-2015g", "tensorflow": "0.12.0", "libXext": "1.3.3-intel-2016a", "GMT": "5.2.1-foss-2015g", "molpro": "2010.1-p45-intel", "fds": "6.svn", "pest": "13.0", "PerformanceReports": "6.0.6", "netcdf-cxx": "4.2", "impi": "2017-BETA.ENG", "Lmod": "7.2.2", "libmatheval": "1.1.11-intel-2015b", "GLib": "2.40.0-GCC-4.4.7-system", "QCA": "2.1.0-foss-2015g", "scite": "3.4.3", "Tk": "8.6.5-intel-2017a", "hpg-fastq": "1.0.0", "SnpEff": "4.1_G", "libpthread-stubs": "0.3-intel-2016a", "bzip2": "1.0.6-intel-2017a", "cmake": "2.8.11-mic", "gnuplot": "4.6.5", "gettext": "0.19.6-intel-2017.00", "VDE2": "2.3.2-GCC-4.4.7-system", "Advisor": "2017", "glproto": "1.4.17-intel-2016a", "ORCA": "3_0_3-linux_x86-64", "llvm": "3.6.0", "papi": "5.4.0-mic", "Serf": "1.3.8-foss-2015g", "libxc": "2.2.1-intel-2015b", "libtool": "2.4.6-GCC-6.3.0-2.27", "libffi": "3.2.1-GCC-4.4.7-system", "libmesh": "0.9.3-petsc-3.4.4-icc-impi-mkl-opt", "opencl-sdk": "4.6.0.92", "GMP": "6.1.1-intel-2017.00", "PETSc": "3.6.3-intel-2015b-Python-2.7.11", "OpenMPI": "2.0.2-GCC-6.3.0-2.27", "netCDF": "4.4.1-intel-2017a", "Hypre": "2.10.1-intel-2015b", "renderproto": "0.11-intel-2015b", "oscar-modules": "1.0.3"}}
!!! Hint "Cluster Acronyms"
A - Anselm • S - Salomon • U - uv1 at Salomon
| Module </br><input id="searchInput" placeholder="🔍 Filter" style="width: 8rem; border-radius: 0.2rem; color: black; padding-left: .2rem;"> | Versions | Clusters |
| ------ | -------- | -------- |
| abinit | 7.6.2</br>7.10.1-gcc-openmpi</br>7.10.1-icc-impi | `--A`</br>`--A`</br>`--A` |
| ABINIT | 7.10.1-foss-2015b</br>7.10.1-intel-2015b | `US-`</br>`US-` |
| adams | 2013.2 | `--A` |
| Adams | 2013.2 | `-S-` |
| adios | 1.8.0 | `--A` |
| Advisor | 2016_update2</br>2017 | `-S-`</br>`-S-` |
| advisor_xe | 2013.5</br>2015.1.10.380555 | `--A`</br>`--A` |
| aislinn | 20160105-Python-2.7.9-gompi-2015e | `-S-` |
| almost | 2.1.0-foss-2015b</br>2.1.0-foss-2015g</br>2.1.0-foss-2016a</br>2.1.0-intel-2015b | `-S-`</br>`-SA`</br>`-SA`</br>`-S-` |
| Amber | 14 | `-S-` |
| ANSYS | 14.5.x</br>15.0.x</br>16.0.x</br>16.1</br>17.0</br>18.0 | `--A`</br>`--A`</br>`--A`</br>`US-`</br>`US-`</br>`-SA` |
| ant | 1.9.3-Java-1.7.0_79 | `-S-` |
| APR | 1.5.2</br>1.5.2-foss-2015g | `-SA`</br>`-SA` |
| APR-util | 1.5.4</br>1.5.4-foss-2015g | `-SA`</br>`-SA` |
| Armadillo | 7.500.0-foss-2016a-Python-3.5.2 | `-SA` |
| arpack-ng | 3.3.0-foss-2016a</br>3.3.0-intel-2015b</br>3.3.0-intel-2017.00</br>3.4.0-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| ATLAS | 3.10.1-GCC-4.9.3-2.25-LAPACK-3.4.2 | `--A` |
| Autoconf | 2.69</br>2.69-foss-2015g</br>2.69-foss-2016a</br>2.69-intel-2015b</br>2.69-intel-2016.01</br>2.69-intel-2016a</br>2.69-intel-2017.00</br>2.69-intel-2017a</br>2.69-GCC-4.9.3-2.25</br>2.69-GCC-6.3.0-2.27</br>2.69-GNU-4.9.3-2.25</br>2.69-GNU-5.1.0-2.25 | `USA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA` |
| Automake | 1.15</br>1.15-foss-2015g</br>1.15-foss-2016a</br>1.15-intel-2015b</br>1.15-intel-2016.01</br>1.15-intel-2016a</br>1.15-intel-2017.00</br>1.15-intel-2017a</br>1.15-GCC-4.9.3-2.25</br>1.15-GCC-6.3.0-2.27</br>1.15-GNU-4.9.3-2.25</br>1.15-GNU-5.1.0-2.25 | `USA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA` |
| Autotools | 20150215</br>20150215-foss-2016a</br>20150215-intel-2015b</br>20150215-intel-2016.01</br>20150215-intel-2016a</br>20150215-intel-2017.00</br>20150215-intel-2017a</br>20150215-GCC-4.9.3-2.25</br>20150215-GCC-6.3.0-2.27</br>20150215-GNU-4.9.3-2.25</br>20150215-GNU-5.1.0-2.25 | `USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`USA` |
| Bash | 4.2-intel-2015b</br>4.3 | `US-`</br>`-SA` |
| BCFtools | 1.3-foss-2015g | `-S-` |
| beopest | 12.0.1</br>12.2</br>13.3 | `--A`</br>`--A`</br>`--A` |
| BerkeleyUPC | 2.16.2-gompi-2015b | `-S-` |
| binutils | 2.25-GCC-4.9.3</br>2.27-GCCcore-6.3.0 | `--A`</br>`-SA` |
| Bison | 3.0.4-intel-2015b</br>3.0.4-GCCcore-6.3.0</br>3.0.4-GCC-4.9.3 | `--A`</br>`-SA`</br>`--A` |
| blender | 2.71 | `--A` |
| boost | 1.56-gcc-openmpi</br>1.56-icc-impi | `--A`</br>`--A` |
| Boost | 1.58.0-foss-2015g-Python-2.7.9</br>1.58.0-gompi-2015e-Python-2.7.9</br>1.58.0-ictce-7.3.5-Python-2.7.9</br>1.58.0-intel-2015b-Python-2.7.9</br>1.58.0-intel-2016.01-Python-2.7.9</br>1.58.0-Python-2.7.9</br>1.59.0-intel-2015b</br>1.59.0-intel-2015b-Python-2.7.11</br>1.59.0-intel-2016.01</br>1.60.0-foss-2015g-Python-2.7.9</br>1.60.0-intel-2015b-Python-2.7.11</br>1.60.0-intel-2016a</br>1.61.0-foss-2016a</br>1.61.0-foss-2016a-serial</br>1.63.0-intel-2017a-Python-2.7.11 | `-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-SA`</br>`-S-` |
| bowtie2 | 2.2.3 | `--A` |
| bullxde | 2.0 | `-S-` |
| bullxmpi | bullxmpi-1.2.4.3</br>bullxmpi_1.2.4.1 | `--A`</br>`--A` |
| bupc | 2.16.2 | `--A` |
| BWA | 0.7.5a-foss-2015g | `-S-` |
| byacc | 20120526</br>20120526-foss-2015b</br>20120526-foss-2015g</br>20120526-foss-2016a</br>20120526-intel-2015b</br>20150711-intel-2015b | `-SA`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-SA`</br>`--A` |
| bzip2 | 1.0.6-intel-2017a | `-S-` |
| cairo | 1.12.18</br>1.12.18-foss-2015b | `-SA`</br>`-S-` |
| chicken | 4.8.0.6 | `--A` |
| Clang | 3.7.0-GNU-5.1.0-2.25 | `-S-` |
| CMake | 3.0.0-foss-2015g</br>3.0.0-ictce-7.3.5</br>3.0.0-intel-2015b</br>3.0.0-intel-2016.01</br>3.3.1-foss-2015g</br>3.3.1-foss-2016a</br>3.3.1-intel-2016.01</br>3.3.1-GCC-4.9.3-2.25</br>3.3.1-GCC-5.3.0-2.25</br>3.3.1-GCC-5.3.1-snapshot-20160419-2.25</br>3.3.1-GNU-4.9.3-2.25</br>3.3.1-GNU-5.1.0-2.25</br>3.3.2-intel-2016.01</br>3.3.2-GNU-4.9.3-2.25</br>3.4.1-foss-2015b</br>3.4.1-foss-2016a</br>3.4.1-intel-2015b</br>3.4.1-GCCcore-4.9.3</br>3.4.3-intel-2016a</br>3.5.1-intel-2016a</br>3.5.2</br>3.5.2-foss-2016a</br>3.5.2-intel-2016a</br>3.5.2-intel-2017.00</br>3.5.2-GCC-4.9.3-2.25</br>3.6.2</br>3.6.2-intel-2017.00</br>3.7.2-intel-2017a | `-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-SA`</br>`--A`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`--A`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`--A`</br>`-S-`</br>`-S-`</br>`U-A`</br>`U--`</br>`-S-` |
| cmake | 2.8.11</br>2.8.11-mic | `--A`</br>`--A` |
| Code_Saturne | 3.0.5 | `--A` |
| COMSOL | 51-COM</br>51-EDU | `-S-`</br>`-S-` |
| comsol | 43b-COM</br>43b-EDU</br>44-COM</br>44-EDU</br>50-COM</br>50-EDU | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| CP2K | 2.6.0-intel-2015b | `-S-` |
| cp2k-mpi | 2.5.1-gcc | `--A` |
| cube | 4.2.3-gcc</br>4.2.3-icc | `--A`</br>`--A` |
| Cube | 4.3.4-intel-2015b | `-S-` |
| CUDA | 7.5.18</br>8.0.44</br>8.0.44-intel-2017.00 | `--A`</br>`--A`</br>`--A` |
| cuda | 6.0.37</br>6.5.14</br>7.5 | `--A`</br>`--A`</br>`--A` |
| cuDNN | 5.1-CUDA-8.0.44 | `--A` |
| cURL | 7.37.1</br>7.37.1-foss-2015g</br>7.37.1-intel-2015b</br>7.37.1-intel-2016.01</br>7.45.0-foss-2015b</br>7.47.0-intel-2017.00</br>7.49.1-intel-2017a</br>7.51.0</br>7.51.0-intel-2017.00 | `USA`</br>`-SA`</br>`US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`--A`</br>`US-` |
| dataspaces | 1.4.0 | `--A` |
| DCW | 1.1.2 | `-S-` |
| DDT | 4.2</br>5.0.1 | `-S-`</br>`-S-` |
| dhi-mike | 2014</br>2016</br>2016-SP2</br>default | `--A`</br>`--A`</br>`--A`</br>`--A` |
| digimat | 5.0.1 | `--A` |
| Digimat | 5.0.1-COM</br>5.0.1-EDU | `-S-`</br>`-S-` |
| Discovery_Studio | 4.0 | `--A` |
| Doxygen | 1.8.7-foss-2015g</br>1.8.7-intel-2015b</br>1.8.7-intel-2016.01</br>1.8.10-foss-2015b</br>1.8.10-intel-2017.00</br>1.8.11</br>1.8.11-intel-2017.00</br>1.8.11-intel-2017a | `-S-`</br>`US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
| dytran | 2013.0.1 | `--A` |
| EasyBuild | 2.8.0</br>2.8.1</br>2.9.0</br>3.0.0</br>3.0.1</br>3.0.2</br>3.1.0 | `-SA`</br>`--A`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA` |
| elmer | 7.0-r6695-dbg</br>7.0-r6695-opt | `--A`</br>`--A` |
| eudev | 3.1.5-foss-2016a</br>3.1.5-intel-2016a | `-S-`</br>`-S-` |
| expat | 2.1.0</br>2.1.0-foss-2015b</br>2.1.0-foss-2015g</br>2.1.0-intel-2015b</br>2.1.0-intel-2017.00 | `USA`</br>`US-`</br>`-SA`</br>`-S-`</br>`US-` |
| FastQC | 0.11.3 | `US-` |
| fastqc | 0.11.2 | `--A` |
| fds | 5.5.3</br>5.5.3-omp</br>6.svn | `--A`</br>`--A`</br>`--A` |
| ffmpeg | 2.4</br>2.4-foss-2015g</br>2.4-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
| FFTW | 2.1.5-gompi-2015b</br>2.1.5-iimpi-7.3.5-GNU-5.1.0-2.25</br>3.3.4-gompi-2015b</br>3.3.4-gompi-2015e</br>3.3.4-gompi-2015g</br>3.3.4-gompi-2016.04</br>3.3.4-gompi-2016a</br>3.3.4-intel-2015b</br>3.3.4-intel-2016.01</br>3.3.5-foss-2016a</br>3.3.5-gompi-2016a</br>3.3.5-intel-2016.01</br>3.3.5-intel-2016a</br>3.3.5-intel-2017.00</br>3.3.6-gompi-2017a | `-S-`</br>`-S-`</br>`US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`USA`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA` |
| fftw2 | 2.1.5-gcc</br>2.1.5-icc | `--A`</br>`--A` |
| fftw2-mpi | 2.1.5-gcc</br>2.1.5-icc | `--A`</br>`--A` |
| fftw3 | 3.3.3-gcc</br>3.3.3-icc | `--A`</br>`--A` |
| fftw3-mpi | 3.3.3-gcc</br>3.3.3-icc | `--A`</br>`--A` |
| FIAT | 1.6.0-intel-2015b-Python-2.7.9</br>1.6.0-intel-2015b-Python-2.7.11</br>1.6.0-intel-2016.01-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
| fixesproto | 5.0</br>5.0-foss-2015g</br>5.0-foss-2016a</br>5.0-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| flex | 2.5.39-foss-2016a</br>2.5.39-GCC-4.9.3</br>2.6.0-intel-2017a</br>2.6.3-GCCcore-6.3.0 | `-SA`</br>`--A`</br>`-S-`</br>`--A` |
| FLTK | 1.3.2</br>1.3.2-intel-2015b | `-SA`</br>`-S-` |
| fontconfig | 2.11.1</br>2.11.1-foss-2015b</br>2.11.1-intel-2015b</br>2.11.94-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| fontsproto | 2.1.3</br>2.1.3-foss-2015g</br>2.1.3-foss-2016a</br>2.1.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| Forge | 5.1-43967</br>5.7</br>6.0.5</br>6.0.6</br>6.1.2</br>7.0 | `-SA`</br>`--A`</br>`-SA`</br>`-SA`</br>`-SA`</br>`-S-` |
| foss | 2015b</br>2015e</br>2015g</br>2016.04</br>2016a</br>2017a | `US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA` |
| FOX | 1.6.51-foss-2015g | `-S-` |
| FreeFem++ | 3.45-intel-2015b | `-S-` |
| freetype | 2.5.3</br>2.5.3-foss-2015b</br>2.5.3-foss-2015g</br>2.5.3-intel-2015b</br>2.5.5-intel-2015b</br>2.6.2-intel-2016a</br>2.6.2-intel-2017.00</br>2.6.3-foss-2016a</br>2.6.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| gatk | 2.6-4 | `--A` |
| GATK | 2.6-5-Java-1.7.0_79</br>3.5-Java-1.7.0_79 | `US-`</br>`-S-` |
| gcc | 4.8.1</br>4.9.0</br>5.4.0 | `--A`</br>`--A`</br>`--A` |
| GCC | 4.4.7-system</br>4.7.4</br>4.8.3</br>4.9.2</br>4.9.2-binutils-2.25</br>4.9.3</br>4.9.3-2.25</br>4.9.3-binutils-2.25</br>5.1.0-binutils-2.25</br>5.2.0</br>5.3.0-2.25</br>5.3.0-2.26</br>5.3.0-binutils-2.25</br>5.3.1-snapshot-20160419-2.25</br>5.4.0-2.26</br>6.2.0-2.27</br>6.3.0-2.27 | `US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`USA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-SA` |
| GCCcore | 4.9.3</br>5.3.0</br>5.3.1-snapshot-20160419</br>5.4.0</br>6.2.0</br>6.3.0 | `USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`US-`</br>`-SA` |
| GDAL | 1.9.2-foss-2015g</br>2.0.1-foss-2015b</br>2.0.2-intel-2017.00</br>2.1.0-foss-2015g</br>2.1.0-intel-2015b</br>2.1.0-GNU-5.1.0-2.25</br>2.1.0-GNU-5.1.0-2.25-intel-2015b | `-SA`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| GEOS | 3.5.0-foss-2015g | `-S-` |
| gettext | 0.19.4</br>0.19.4-GCC-4.4.7-system</br>0.19.6-intel-2017.00</br>0.19.8 | `--A`</br>`U--`</br>`--A`</br>`-SA` |
| gimkl | 2.11.5 | `--A` |
| gimpi | 2.11.5 | `--A` |
| git | 2.8.0-intel-2017.00</br>2.8.0-GNU-4.9.3-2.25</br>2.9.0</br>2.9.2</br>2.11.0</br>2.11.0-intel-2017.00</br>2.11.0-GNU-4.9.3-2.25</br>2.11.1-GNU-4.9.3-2.25 | `US-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`U--`</br>`--A`</br>`--A` |
| GLib | 2.40.0</br>2.40.0-foss-2015b</br>2.40.0-foss-2015g</br>2.40.0-intel-2015b</br>2.40.0-intel-2016.01</br>2.40.0-GCC-4.4.7-system | `-SA`</br>`U--`</br>`-S-`</br>`US-`</br>`-S-`</br>`U--` |
| GLM | 0.9.7.2-intel-2017a | `-S-` |
| GLOBUS | globus | `--A` |
| globus | globus | `-S-` |
| glproto | 1.4.16-foss-2015g</br>1.4.17-foss-2016a</br>1.4.17-intel-2016a | `-S-`</br>`-S-`</br>`-S-` |
| GMP | 5.0.5</br>5.0.5-foss-2015b</br>5.0.5-foss-2015g</br>5.0.5-intel-2015b</br>6.0.0a</br>6.0.0a-foss-2015b</br>6.0.0a-intel-2015b</br>6.0.0a-GNU-4.9.3-2.25</br>6.0.0a-GNU-5.1.0-2.25</br>6.1.0-foss-2016a</br>6.1.0-intel-2015b</br>6.1.0-intel-2016.01</br>6.1.0-intel-2016a</br>6.1.0-intel-2017.00</br>6.1.0-GCC-4.9.3-2.25</br>6.1.1-intel-2017.00</br>6.1.1-intel-2017a | `-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-` |
| GMT | 5.2.1-foss-2015g | `-S-` |
| GNU | 4.9.3-2.25</br>5.1.0-2.25</br>5.1.0-2.25-intel-2015b | `USA`</br>`USA`</br>`-S-` |
| gnuplot | 4.6.5 | `--A` |
| gompi | 2015b</br>2015e</br>2015g</br>2016.04</br>2016a</br>2017a | `US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA` |
| gperf | 3.0.4-foss-2015g</br>3.0.4-foss-2016a</br>3.0.4-intel-2016a | `-S-`</br>`-S-`</br>`-S-` |
| GPI-2 | 1.1.1-gompi-2015e</br>1.1.1-gompi-2015e-MPI | `-S-`</br>`-S-` |
| gpi2 | 1.0.2</br>1.1.0</br>1.1.1 | `--A`</br>`--A`</br>`--A` |
| grace | 5.1.23</br>5.1.25-intel-2015b | `--A`</br>`-S-` |
| GROMACS | 4.6.7-foss-2015g-hybrid-single-PLUMED</br>5.0.4-foss-2015e-hybrid-single-PLUMED</br>5.0.4-foss-2015g-hybrid-single</br>5.0.4-foss-2015g-hybrid-single-PLUMED</br>5.0.4-ictce-7.3.5-hybrid-single</br>5.1.2-foss-2015g-hybrid-single-PLUMED</br>5.1.2-intel-2015b-hybrid-single-cuda</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1-test</br>5.1.2-intel-2016a-hybrid</br>5.1.4-foss-2016a-hybrid-single-PLUMED | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`-S-` |
| gsl | 1.16-gcc</br>1.16-icc | `--A`</br>`--A` |
| GSL | 1.16-intel-2015b</br>1.16-intel-2016.01</br>2.1-intel-2015b | `-SA`</br>`--A`</br>`--A` |
| guile | 1.8.8</br>1.8.8-foss-2015b</br>1.8.8-foss-2015g</br>1.8.8-foss-2016a</br>1.8.8-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA` |
| gupc | 4.8.0.3 | `--A` |
| gzip | 1.6</br>1.6-foss-2015g</br>1.6-foss-2016a</br>1.6-intel-2015b | `-SA`</br>`USA`</br>`-SA`</br>`-SA` |
| h5py | 2.4.0-ictce-7.3.5-Python-2.7.9-serial | `-S-` |
| Harminv | 1.4-intel-2015b | `-S-` |
| HDF5 | 1.8.13-foss-2015g</br>1.8.13-intel-2015b</br>1.8.13-intel-2015b-no-mpi</br>1.8.13-intel-2016.01</br>1.8.14-ictce-7.3.5-serial</br>1.8.15-patch1-foss-2015b</br>1.8.16-foss-2015g</br>1.8.16-foss-2016a</br>1.8.16-intel-2015b</br>1.8.16-intel-2015b-threadsafe</br>1.8.16-intel-2016.01</br>1.8.16-intel-2017.00</br>1.8.17-intel-2017a</br>1.10.0-patch1-intel-2016.01-mic | `-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`USA`</br>`USA`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-` |
| hdf5 | 1.8.11</br>1.8.13 | `--A`</br>`--A` |
| hdf5-parallel | 1.8.11</br>1.8.11-gcc</br>1.8.13</br>1.8.13-gcc</br>1.8.13-gcc49 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| help2man | 1.47.4-GCCcore-6.3.0 | `-SA` |
| hpg-aligner | 1.0.0 | `--A` |
| hpg-fastq | 1.0.0 | `--A` |
| hpg-variant | 1.0.0 | `--A` |
| HPL | 2.1-foss-2015b</br>2.1-intel-2015b | `-S-`</br>`-S-` |
| HTSlib | 1.3-foss-2015g | `-S-` |
| hwloc | 1.5-GCC-4.4.7-system</br>1.11.0</br>1.11.0-GNU-4.9.3-2.25</br>1.11.0-GNU-5.1.0-2.25</br>1.11.1-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.11.2-GCC-4.9.3-2.25</br>1.11.3-GCC-5.3.0-2.26</br>1.11.4-iccifort-2017.1.132-GCC-5.4.0-2.26</br>1.11.4-GCC-6.2.0-2.27</br>1.11.5-GCC-6.3.0-2.27 | `-S-`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA` |
| hypermesh | 12.0.110 | `--A` |
| HyperWorks | 13.0 | `-S-` |
| hyperworks | 13.0 | `--A` |
| Hypre | 2.10.0b-intel-2015b</br>2.10.0b-intel-2016.01</br>2.10.1-intel-2015b</br>2.11.1-intel-2017a | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| icc | 2013.5.192</br>2013.5.192-GCC-4.8.3</br>2015.3.187</br>2015.3.187-GNU-4.9.3-2.25</br>2015.3.187-GNU-5.1.0-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2016.1.150-GCC-4.9.3</br>2016.1.150-GCC-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-GCC-6.3.0-2.27 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
| iccifort | 2013.5.192</br>2013.5.192-GCC-4.8.3</br>2015.3.187</br>2015.3.187-GNU-4.9.3-2.25</br>2015.3.187-GNU-5.1.0-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2016.1.150-GCC-4.9.3</br>2016.1.150-GCC-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-GCC-6.3.0-2.27 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
| ictce | 5.5.0</br>7.3.5</br>8.3.5 | `-S-`</br>`-S-`</br>`-S-` |
| ifort | 2013.5.192</br>2013.5.192-GCC-4.8.3</br>2015.3.187</br>2015.3.187-GNU-4.9.3-2.25</br>2015.3.187-GNU-5.1.0-2.25</br>2016.0.109-GCC-4.9.3</br>2016.1.150</br>2016.1.150-GCC-4.9.3</br>2016.1.150-GCC-4.9.3-2.25</br>2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-GCC-6.3.0-2.27 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
| iimpi | 5.5.0</br>5.5.0-GCC-4.8.3</br>7.3.5</br>7.3.5-GNU-5.1.0-2.25</br>8.1.5-GCC-4.9.3-2.25</br>8.3.5</br>2016.00-GCC-4.9.3</br>2016.01-GCC-4.9.3</br>2016.01-GCC-4.9.3-2.25</br>2016.03-GCC-5.3.0-2.26</br>2017.00-GCC-5.4.0-2.26</br>2017.01-GCC-5.4.0-2.26</br>2017a | `-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
| imkl | 11.0.5.192-iimpi-5.5.0</br>11.0.5.192-iimpi-5.5.0-GCC-4.8.3</br>11.2.3.187</br>11.2.3.187-gimpi-2.11.5</br>11.2.3.187-iimpi-7.3.5</br>11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25</br>11.2.3.187-iompi-2015.03</br>11.3.0.109-iimpi-2016.00-GCC-4.9.3</br>11.3.1.150-iimpi-8.1.5-GCC-4.9.3-2.25</br>11.3.1.150-iimpi-8.3.5</br>11.3.1.150-iimpi-2016.00-GCC-4.9.3</br>11.3.1.150-iimpi-2016.01-GCC-4.9.3-2.25</br>11.3.3.210-iimpi-2016.03-GCC-5.3.0-2.26</br>2017.0.098-iimpi-2017.00-GCC-5.4.0-2.26</br>2017.1.132-iimpi-2017.01-GCC-5.4.0-2.26</br>2017.1.132-iimpi-2017a | `-S-`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA` |
| impi | 4.1.1.036</br>4.1.1.036-iccifort-2013.5.192</br>4.1.1.036-iccifort-2013.5.192-GCC-4.8.3</br>5.0.3.048</br>5.0.3.048-iccifort-2015.3.187</br>5.0.3.048-iccifort-2015.3.187-GNU-5.1.0-2.25</br>5.0.3.048-GCC-4.9.3</br>5.1.1.109-iccifort-2016.0.109-GCC-4.9.3</br>5.1.2.150-iccifort-2016.1.150</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3-2.25</br>5.1.3.181-iccifort-2016.3.210-GCC-5.3.0-2.26</br>2017.0.098-iccifort-2017.0.098-GCC-5.4.0-2.26</br>2017.1.132-iccifort-2017.1.132-GCC-5.4.0-2.26</br>2017.1.132-iccifort-2017.1.132-GCC-6.3.0-2.27</br>2017-BETA.ENG | `--A`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-SA`</br>`-S-` |
| inputproto | 2.3</br>2.3.1-foss-2016a</br>2.3.1-intel-2016a</br>2.3-foss-2015g</br>2.3-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| Inspector | 2016_update1 | `-S-` |
| inspector_xe | 2013.5</br>2015.1.2.379161 | `--A`</br>`--A` |
| intel | 13.5.192</br>14.0.1</br>15.2.164</br>15.3.187</br>2014.06</br>2015b</br>2015b-intel-2015b</br>2016.00</br>2016.01</br>2016.03-GCC-5.3</br>2016a</br>2017.00</br>2017.01</br>2017a | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`-S-`</br>`USA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-SA`</br>`USA`</br>`-SA`</br>`-SA` |
| intelpcm | 2.6 | `--A` |
| iompi | 2017.01 | `-S-` |
| ipm | 0.983-icc-impi | `--A` |
| ipp | 9.0.1.150</br>13.5.192</br>14.0.1</br>15.2.164</br>15.3.187 | `-S-`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| ISL | 0.14-GNU-5.1.0-2.25</br>0.15</br>0.15-GCC-4.9.3-2.25</br>0.15-GNU-4.9.3-2.25 | `-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
| itac | 8.1.4.045</br>9.0.3.051</br>9.1.2.024 | `--A`</br>`--A`</br>`-S-` |
| JasPer | 1.900.1-intel-2015b | `-S-` |
| java | 1.7 | `--A` |
| Java | 1.7.0_79</br>1.8.0_51</br>1.8.0_72</br>1.8.0_112</br>1.8.0_121 | `USA`</br>`USA`</br>`US-`</br>`-S-`</br>`-S-` |
| JOE | 4.2 | `-SA` |
| JUnit | 4.11-Java-1.7.0_79 | `-S-` |
| kbproto | 1.0.6</br>1.0.6-foss-2015g</br>1.0.6-intel-2015b</br>1.0.7</br>1.0.7-foss-2016a</br>1.0.7-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| lam | 7.1.4-icc | `--A` |
| LAMMPS | 28Jun14-intel-2015b | `-S-` |
| lammps | 28Jun14 | `--A` |
| LAPACKE | 3.5.0-LAPACK-3.5.0 | `-S-` |
| libctl | 3.2.2-intel-2015b | `-S-` |
| libdrm | 2.4.27</br>2.4.27-foss-2015g</br>2.4.67-intel-2016a</br>2.4.68-foss-2016a</br>2.4.68-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| libevent | 2.1.8 | `-SA` |
| libffi | 3.0.13</br>3.0.13-foss-2015b</br>3.0.13-foss-2015g</br>3.0.13-intel-2015b</br>3.1-foss-2015b</br>3.1-intel-2015b</br>3.1-intel-2016.01</br>3.1-GNU-5.1.0-2.25</br>3.2.1-foss-2016a</br>3.2.1-intel-2017.00</br>3.2.1-intel-2017a</br>3.2.1-GCC-4.4.7-system | `-SA`</br>`US-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`U--` |
| libfontenc | 1.1.3</br>1.1.3-foss-2015g</br>1.1.3-foss-2016a</br>1.1.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| libgdiplus | 3.12</br>3.12-intel-2016.01</br>3.12-GCC-4.4.7-system</br>3.12-GNU-5.1.0-2.25 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| libGLU | 9.0.0-foss-2015g | `-S-` |
| libICE | 1.0.9</br>1.0.9-foss-2015g</br>1.0.9-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
| Libint | 1.1.4-foss-2015b</br>1.1.4-gompi-2015b</br>1.1.4-intel-2015b | `-S-`</br>`-S-`</br>`US-` |
| libjpeg-turbo | 1.3.1-foss-2015b</br>1.3.1-intel-2015b</br>1.4.0</br>1.4.0-foss-2015g</br>1.4.0-intel-2015b</br>1.4.1-foss-2015b</br>1.4.2-intel-2017.00 | `-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-` |
| libmatheval | 1.1.8</br>1.1.8-foss-2015b</br>1.1.8-foss-2015g</br>1.1.8-foss-2016a</br>1.1.8-intel-2015b</br>1.1.11-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`--A` |
| libMesh | 0.9.5-intel-2016.01 | `-S-` |
| libmesh | 0.9.3-petsc-3.4.4-icc-impi-mkl-dbg</br>0.9.3-petsc-3.4.4-icc-impi-mkl-dbg-2d</br>0.9.3-petsc-3.4.4-icc-impi-mkl-opt | `--A`</br>`--A`</br>`--A` |
| libpciaccess | 0.13.1</br>0.13.1-foss-2015g</br>0.13.4-foss-2016a</br>0.13.4-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| libpng | 1.6.16-intel-2015b | `-S-` |
| libpthread-stubs | 0.3</br>0.3-foss-2015g</br>0.3-foss-2016a</br>0.3-intel-2015b</br>0.3-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| libreadline | 6.3-gimkl-2.11.5</br>6.3-intel-2017a | `--A`</br>`-S-` |
| libSM | 1.2.2</br>1.2.2-foss-2015g</br>1.2.2-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
| LibTIFF | 4.0.3</br>4.0.3-intel-2015b | `-SA`</br>`-S-` |
| libtool | 2.4.6-intel-2017a</br>2.4.6-GCC-6.3.0-2.27 | `-S-`</br>`-SA` |
| libunistring | 0.9.3</br>0.9.3-foss-2015b</br>0.9.3-foss-2015g</br>0.9.3-foss-2016a</br>0.9.3-intel-2015b | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA` |
| libunwind | 1.1-GCC-5.4.0-2.26 | `-S-` |
| libX11 | 1.6.2-foss-2015g-Python-2.7.9</br>1.6.2-intel-2015b-Python-2.7.9</br>1.6.2-Python-2.7.8</br>1.6.2-Python-2.7.9</br>1.6.3-foss-2016a</br>1.6.3-intel-2016a | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
| libXau | 1.0.8</br>1.0.8-foss-2015g</br>1.0.8-foss-2016a</br>1.0.8-intel-2015b</br>1.0.8-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| libxc | 2.2.0-foss-2015b</br>2.2.0-gompi-2015b</br>2.2.1-intel-2015b | `-S-`</br>`-S-`</br>`US-` |
| libxcb | 1.10-Python-2.7.8</br>1.11.1-foss-2016a</br>1.11.1-intel-2016a</br>1.11-foss-2015g-Python-2.7.9</br>1.11-intel-2015b-Python-2.7.9</br>1.11-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
| libXdamage | 1.1.4-foss-2015g-Python-2.7.9</br>1.1.4-foss-2016a</br>1.1.4-intel-2016a | `-S-`</br>`-S-`</br>`-S-` |
| libXdmcp | 1.1.2</br>1.1.2-foss-2015g</br>1.1.2-foss-2016a</br>1.1.2-intel-2015b</br>1.1.2-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| libXext | 1.3.2-Python-2.7.8</br>1.3.3</br>1.3.3-foss-2015g</br>1.3.3-foss-2015g-Python-2.7.9</br>1.3.3-foss-2016a</br>1.3.3-intel-2015b</br>1.3.3-intel-2016a | `-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| libXfixes | 5.0.1</br>5.0.1-foss-2015g</br>5.0.1-foss-2016a</br>5.0.1-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| libXfont | 1.5.1-foss-2015g-Python-2.7.9</br>1.5.1-foss-2016a-freetype-2.6.3</br>1.5.1-intel-2016a</br>1.5.1-intel-2016a-freetype-2.6.3</br>1.5.1-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
| libXft | 2.3.2-intel-2015b | `-S-` |
| libXinerama | 1.1.3-intel-2015b | `-S-` |
| libxml2 | 2.9.2</br>2.9.2-foss-2015b</br>2.9.2-foss-2015g</br>2.9.2-foss-2015g-Python-2.7.9</br>2.9.2-gompi-2015e</br>2.9.2-ictce-7.3.5</br>2.9.2-intel-2015b</br>2.9.2-GCC-4.4.7-system</br>2.9.2-GCC-4.9.3-2.25</br>2.9.2-GNU-4.9.3-2.25</br>2.9.2-GNU-5.1.0-2.25</br>2.9.3-foss-2016a</br>2.9.3-intel-2016a</br>2.9.3-intel-2017.00 | `-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`U--`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`US-` |
| libXrender | 0.9.8</br>0.9.8-intel-2015b | `-S-`</br>`-S-` |
| libxslt | 1.1.28-intel-2015b | `-S-` |
| libXt | 1.1.4-foss-2015g-libX11-1.6.2</br>1.1.4-intel-2015b-libX11-1.6.2</br>1.1.4-libX11-1.6.2</br>1.1.5-foss-2015g | `-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
| libyaml | 0.1.6-intel-2015b | `-S-` |
| likwid | 3.1.1-icc</br>3.1.1-mic</br>3.1.2-gcc</br>3.1.2-icc</br>3.1.2-mic</br>4.1.2-gcc</br>4.1.2-intel | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`-S-`</br>`-S-` |
| LLVM | 3.7.1-foss-2015g</br>3.7.1-intel-2016a</br>3.8.0-foss-2016a</br>3.8.0-intel-2016a</br>3.9.0-intel-2017.00 | `-S-`</br>`-S-`</br>`US-`</br>`-S-`</br>`US-` |
| llvm | 3.6.0 | `--A` |
| lmod | 7.2.2 | `USA` |
| Lmod | 7.0.6</br>7.2.2 | `-S-`</br>`-SA` |
| lsdyna | 7.x.x | `--A` |
| lsprepost | 4.2 | `--A` |
| Lua | 5.1.4-8 | `USA` |
| lux | 1.3.1 | `--A` |
| lxml | 3.4.4-intel-2015b-Python-2.7.9 | `-S-` |
| M4 | 1.4.16-foss-2015g</br>1.4.16-intel-2015b</br>1.4.17</br>1.4.17-foss-2015b</br>1.4.17-foss-2015g</br>1.4.17-foss-2016a</br>1.4.17-intel-2015b</br>1.4.17-intel-2016.01</br>1.4.17-intel-2016a</br>1.4.17-intel-2017.00</br>1.4.17-GCCcore-4.9.3</br>1.4.17-GCCcore-5.3.0</br>1.4.17-GCCcore-5.3.1-snapshot-20160419</br>1.4.17-GCCcore-5.4.0</br>1.4.17-GCCcore-6.2.0</br>1.4.17-GCC-4.9.3</br>1.4.17-GCC-4.9.3-2.25</br>1.4.17-GCC-4.9.3-binutils-2.25</br>1.4.17-GCC-5.1.0-binutils-2.25</br>1.4.17-GNU-4.9.3-2.25</br>1.4.17-GNU-5.1.0-2.25</br>1.4.18-GCCcore-6.3.0 | `-S-`</br>`-SA`</br>`USA`</br>`US-`</br>`-S-`</br>`USA`</br>`USA`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`US-`</br>`--A`</br>`-S-`</br>`USA`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA` |
| magma | 1.1.0-mic</br>1.3.0-mic | `--A`</br>`--A` |
| make | 3.82</br>3.82-intel-2015b | `-SA`</br>`US-` |
| makedepend | 1.0.4</br>1.0.4-foss-2015g</br>1.0.5-foss-2016a</br>1.0.5-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| MAP | 4.2</br>5.0.1 | `-S-`</br>`-S-` |
| marc | 2011</br>2013.1 | `--A`</br>`--A` |
| Marc | 2013.1.0 | `-S-` |
| MATIO | 1.5.2-intel-2017a | `-S-` |
| matlab | R2013a-COM</br>R2013a-EDU</br>R2014a-COM</br>R2014a-EDU | `--A`</br>`--A`</br>`--A`</br>`--A` |
| MATLAB | 2015a-COM</br>2015a-EDU</br>2015b-COM</br>2015b-EDU | `US-`</br>`US-`</br>`-SA`</br>`-SA` |
| matplotlib | 1.4.3-intel-2015b-Python-2.7.9 | `-S-` |
| Maven | 3.3.9 | `USA` |
| maxwell | 3.0 | `--A` |
| Meep | 1.3-intel-2015b | `-S-` |
| memoryscape | 3.4 | `--A` |
| mercurial | 2.9.1 | `--A` |
| Mercurial | 3.5-Python-2.7.9</br>3.7.3-foss-2015g-Python-2.7.9 | `-S-`</br>`USA` |
| Mesa | 11.0.8-foss-2015g-Python-2.7.9</br>11.2.1-foss-2016a | `-S-`</br>`-S-` |
| METIS | 5.1.0-intel-2015b</br>5.1.0-intel-2016.01</br>5.1.0-intel-2016.01-32bitIDX</br>5.1.0-intel-2017.00 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| MIKE | 2014</br>2016</br>2016-SP2</br>default | `-SA`</br>`-SA`</br>`-SA`</br>`--A` |
| mkl | 13.5.192</br>14.0.1</br>15.2.164</br>15.3.187 | `--A`</br>`--A`</br>`--A`</br>`--A` |
| MLD2P4 | 2.0-rc4-GCC-4.9.3-2.25 | `--A` |
| modflow-2005 | 1.11.00 | `--A` |
| modflow-nwt | 1.0.9</br>1.0.9-aquaveo | `--A`</br>`--A` |
| Molpro | 2010.1-patch-57-intel2015b | `-S-` |
| molpro | 2010.1-p45-intel | `--A` |
| mono | 3.2.3</br>3.12.1 | `--A`</br>`--A` |
| Mono | 3.12.1</br>3.12.1-GCC-4.4.7-system</br>4.0.3.20-GNU-5.1.0-2.25</br>4.2.2.10-intel-2016.01</br>4.6.2.16 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
| motif | 2.3.4-foss-2015g-libX11-1.6.2</br>2.3.4-intel-2015b-libX11-1.6.2 | `-S-`</br>`-S-` |
| MPC | 1.0.2-intel-2017.00 | `-S-` |
| MPFR | 3.1.5-intel-2017.00 | `-S-` |
| mpi.net | 1.0.0</br>1.0.0-impi</br>1.0.0-mono-3.12.1 | `--A`</br>`--A`</br>`--A` |
| MPI_NET | 1.2.0-gompi-2015e</br>1.2.0-intel-2016.01 | `-S-`</br>`-S-` |
| MPICH | 3.2-GCC-4.9.3-2.25</br>3.2-GCC-5.3.0-2.25</br>3.2-GCC-5.3.1-snapshot-20160419-2.25 | `--A`</br>`-S-`</br>`-S-` |
| mpt | 2.12 | `-S-` |
| MUMPS | 5.0.2-intel-2017a-parmetis | `-S-` |
| mvapich2 | 1.9-gcc</br>1.9-gcc46</br>1.9-icc | `--A`</br>`--A`</br>`--A` |
| MVAPICH2 | 2.1-iccifort-2015.3.187-GNU-5.1.0-2.25</br>2.1-GCC-4.4.7-system</br>2.1-GNU-5.1.0-2.25 | `-S-`</br>`-S-`</br>`-S-` |
| mxml | 2.9 | `--A` |
| namd | 2.8 | `--A` |
| NAMD | 2.9-mpi | `-S-` |
| NASM | 2.11.05</br>2.11.05-foss-2015b</br>2.11.05-foss-2015g</br>2.11.05-intel-2015b</br>2.11.06-intel-2015b</br>2.11.08-foss-2015b</br>2.11.08-intel-2017.00 | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-S-` |
| nastran | 2013.1.1 | `--A` |
| ncurses | 5.9-gimkl-2.11.5</br>5.9-GCC-4.9.3-2.25</br>6.0-intel-2017a | `--A`</br>`--A`</br>`-S-` |
| netcdf | 4.2.1.1</br>4.3.0 | `--A`</br>`--A` |
| netCDF | 4.3.2-foss-2015g</br>4.3.2-intel-2015b</br>4.3.2-intel-2016.01</br>4.3.3.1-foss-2015b</br>4.3.3.1-intel-2017.00</br>4.4.0-intel-2017.00</br>4.4.1-intel-2017a | `-S-`</br>`US-`</br>`-S-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-` |
| netcdf-cxx | 4.2 | `--A` |
| netcdf-fortran | 4.2 | `--A` |
| netCDF-Fortran | 4.4.0-intel-2016.01 | `-S-` |
| netcdf-parallel | 4.3.0 | `--A` |
| ngsPipeline | 1.0.0 | `--A` |
| numactl | 2.0.9</br>2.0.9-GCC-4.4.7-system</br>2.0.10</br>2.0.10-iccifort-2015.3.187-GNU-4.9.3-2.25</br>2.0.10-GNU-4.9.3-2.25</br>2.0.10-GNU-5.1.0-2.25</br>2.0.11</br>2.0.11-GCCcore-5.4.0</br>2.0.11-GCC-4.9.3-2.25</br>2.0.11-GCC-5.3.0-2.26</br>2.0.11-GCC-6.2.0-2.27</br>2.0.11-GCC-6.3.0-2.27 | `--A`</br>`-S-`</br>`-SA`</br>`-SA`</br>`USA`</br>`USA`</br>`-SA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-SA` |
| numpy | 1.8.2-intel-2015b-Python-2.7.9</br>1.8.2-intel-2015b-Python-2.7.11</br>1.8.2-intel-2016.01-Python-2.7.9</br>1.9.1-intel-2015b-Python-2.7.9 | `-SA`</br>`-SA`</br>`-SA`</br>`-S-` |
| NWChem | 6.3.revision2-2013-10-17-Python-2.7.8</br>6.5.revision26243-intel-2015b-2014-09-10-Python-2.7.8 | `-S-`</br>`-S-` |
| nwchem | 6.1.1</br>6.3-rev2-patch1</br>6.3-rev2-patch1-openmpi</br>6.3-rev2-patch1-venus | `--A`</br>`--A`</br>`--A`</br>`--A` |
| Octave | 3.8.2-foss-2015g</br>3.8.2-gimkl-2.11.5</br>3.8.2-intel-2015b</br>4.0.0-foss-2015g</br>4.0.1-gimkl-2.11.5 | `-S-`</br>`--A`</br>`-S-`</br>`-SA`</br>`--A` |
| opari2 | 1.1.2-gcc</br>1.1.2-icc | `--A`</br>`--A` |
| OPARI2 | 1.1.4-intel-2015b</br>2.0 | `-S-`</br>`-SA` |
| OpenBLAS | 0.2.14-gompi-2015e-LAPACK-3.5.0</br>0.2.14-GNU-4.9.3-2.25-LAPACK-3.5.0</br>0.2.14-GNU-5.1.0-2.25-LAPACK-3.5.0</br>0.2.14-LAPACK-3.5.0</br>0.2.15-GCC-4.9.3-2.25-LAPACK-3.6.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0-gompi-2016a</br>0.2.18-GCC-5.3.0-2.26-LAPACK-3.6.0</br>0.2.19-GCC-5.4.0-2.26-LAPACK-3.6.0</br>0.2.19-GCC-6.3.0-2.27-LAPACK-3.7.0 | `US-`</br>`USA`</br>`USA`</br>`-S-`</br>`USA`</br>`--A`</br>`--A`</br>`-SA`</br>`-S-`</br>`-SA` |
| OpenCL-builder | 2015 | `-S-` |
| opencl-rt | 4.5.0.8 | `--A` |
| OpenCL-runtime | 15.1 | `-S-` |
| opencl-sdk | 4.6.0.92 | `--A` |
| OpenCoarrays | 1.0.0-GNU-5.1.0-2.25</br>1.0.1-GNU-5.1.0-2.25</br>1.4.0-GCC-5.3.0-2.25</br>1.4.0-GCC-5.3.1-snapshot-20160419-2.25 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| OpenCV | 2.4.9-intel-2015b</br>3.0.0-intel-2015b | `-S-`</br>`-S-` |
| OpenDX | 4.4.4-foss-2015g | `-S-` |
| openfoam | 2.2.1-gcc481-openmpi1.6.5-DP</br>2.2.1-gcc481-openmpi1.6.5-SP</br>2.2.1-icc-impi4.1.1.036-DP</br>2.2.1-icc-openmpi1.6.5-DP</br>2.2.2-icc-openmpi1.8.1-DP | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| OpenFOAM | 2.2.2-intel-2015b</br>2.3.0-intel-2015b</br>3.0.0-intel-2016.01 | `US-`</br>`US-`</br>`-S-` |
| openmpi | 1.6.5-gcc</br>1.6.5-gcc46</br>1.6.5-icc</br>1.8.1-gcc</br>1.8.1-gcc46</br>1.8.1-gcc49</br>1.8.1-icc | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| OpenMPI | 1.8.6-iccifort-2015.3.187-GNU-5.1.0-2.25</br>1.8.6-GCC-4.4.7-system</br>1.8.6-GNU-5.1.0-2.25</br>1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.8.8-GNU-4.9.3-2.25</br>1.8.8-GNU-5.1.0-2.25</br>1.10.1-GCC-4.9.3-2.25</br>1.10.1-GNU-4.9.3-2.25</br>1.10.2-GCC-4.9.3-2.25</br>1.10.2-GCC-5.3.0-2.26</br>2.0.1-iccifort-2017.1.132-GCC-5.4.0-2.26</br>2.0.2-GCC-6.3.0-2.27 | `-S-`</br>`US-`</br>`US-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-S-`</br>`US-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-SA` |
| openssh-x509 | 6.2p2 | `--A` |
| ORCA | 3_0_3-linux_x86-64 | `-SA` |
| oscar-modules | 1.0.3 | `-S-` |
| OSPRay | 0.9.1 | `-S-` |
| OTF2 | 1.4-intel-2015b</br>2.0</br>2.0-intel-2015b-mic | `-S-`</br>`-SA`</br>`-S-` |
| otf2 | 1.2.1-gcc</br>1.2.1-icc</br>1.4-gcc</br>1.4-icc | `--A`</br>`--A`</br>`--A`</br>`--A` |
| p4vasp | 0.3.29-GNU-4.9.3-2.25 | `-S-` |
| PAPI | 5.4.0-intel-2015b</br>5.4.0-mic</br>5.4.0-p-mic</br>5.4.3</br>5.4.3-intel-2015b-mic</br>5.4.3-pic | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-` |
| papi | 5.3.0</br>5.3.2</br>5.3.2-mic</br>5.4.0</br>5.4.0-mic | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| parallel | 20141122</br>20150322</br>20150322-GNU-5.1.0-2.25 | `--A`</br>`-S-`</br>`-S-` |
| ParaView | 4.3-OSPRay</br>5.0.0-binary | `-S-`</br>`-S-` |
| paraview | 4.0.1-gcc481-bullxmpi1.2.4.1-osmesa10.0 | `--A` |
| ParMETIS | 4.0.3-intel-2015b</br>4.0.3-intel-2016.01</br>4.0.3-intel-2017a | `-S-`</br>`-S-`</br>`-S-` |
| PCRE | 8.36-foss-2015g</br>8.36-intel-2015b</br>8.36-intel-2016.01</br>8.37</br>8.37-foss-2015g</br>8.37-gimkl-2.11.5</br>8.37-intel-2016.01</br>8.39-intel-2017.00 | `-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`--A`</br>`--A`</br>`-S-` |
| PCRE2 | 10.22-intel-2017.00 | `-S-` |
| perfboost | 1.0 | `-S-` |
| perfcatcher | 1.0 | `-S-` |
| PerformanceReports | 5.1-43967</br>6.0.6 | `-S-`</br>`-SA` |
| PerfReports | 5.0.1 | `-S-` |
| perfsuite | 1a5.3 | `-S-` |
| Perl | 5.16.3-intel-2015b</br>5.20.2-bare</br>5.20.2-GNU-4.9.3-2.25-bare</br>5.22.2-intel-2017.00</br>5.24.0-GCC-4.9.3-2.25-bare | `US-`</br>`-S-`</br>`-SA`</br>`US-`</br>`--A` |
| pest | 13.0 | `--A` |
| petsc | 3.5.3-icc15-impi-mkl-dbg</br>3.5.3-icc15-impi-mkl-opt</br>3.5.3-icc15-impi-mkl-threads-dbg</br>3.5.3-icc15-impi-mkl-threads-opt</br>3.7.3-icc16-impi5-mkl-dbg</br>3.7.3-icc16-impi5-mkl-opt | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| PETSc | 3.6.1-intel-2015b-Python-2.7.9</br>3.6.1-intel-2016.01-Python-2.7.9</br>3.6.3-intel-2015b-Python-2.7.11 | `-S-`</br>`-S-`</br>`-S-` |
| PGI | 15.7</br>16.10-GNU-4.9.3-2.25 | `-S-`</br>`-S-` |
| phono3py | 0.9.14-ictce-7.3.5-Python-2.7.9</br>1.11.7.8-intel-2015b-Python-2.7.9</br>1.11.7.8-intel-2015b-Python-2.7.11 | `-S-`</br>`-S-`</br>`--A` |
| phonopy | 1.11.6.7-intel-2015b-Python-2.7.9</br>1.11.6.7-intel-2015b-Python-2.7.11 | `-S-`</br>`--A` |
| picard | 1.117</br>1.119</br>2.1.0 | `--A`</br>`-S-`</br>`-S-` |
| pigz | 2.3.3-GCC-6.2.0-2.27 | `US-` |
| pixman | 0.32.6</br>0.32.6-foss-2015b</br>0.32.6-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
| pkg-config | 0.27.1</br>0.27.1-foss-2015b</br>0.27.1-foss-2015g</br>0.27.1-intel-2015b</br>0.29</br>0.29.1-foss-2016a</br>0.29.1-intel-2016a</br>0.29-foss-2016a</br>0.29-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-S-` |
| plasma | 2.6.0 | `--A` |
| PLUMED | 2.1.3-foss-2015g</br>2.2.1-intel-2015b</br>2.3b-foss-2015g</br>2.3b-foss-2016a | `-S-`</br>`--A`</br>`-S-`</br>`-SA` |
| prace | 20160107-intel-2016.01 | `-S-` |
| PRACE | 20150630-intel-2015b</br>prace | `US-`</br>`--A` |
| PrgEnv-gnu | 4.4.6</br>4.4.6-test</br>4.8.1 | `--A`</br>`--A`</br>`--A` |
| PrgEnv-intel | 13.5.192</br>14.0.1</br>15.0.3 | `--A`</br>`--A`</br>`--A` |
| PROJ | 4.8.0-foss-2015b</br>4.9.2-intel-2017.00 | `US-`</br>`-S-` |
| PROJ_4 | 4.9.2-foss-2015g | `-S-` |
| PSBLAS | 3.3.4-3-GCC-4.9.3-2.25 | `--A` |
| PSBLAS-ext | 1.0-4-GCC-4.9.3-2.25 | `--A` |
| PyQt | 4.11.3-foss-2015g-Python-2.7.9</br>4.11.4-foss-2015g-Python-2.7.9 | `-S-`</br>`-S-` |
| python | 2.7.5</br>2.7.6</br>3.3.2</br>3.3.5</br>3.4.2 | `--A`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| Python | 2.7.8-intel-2015b</br>2.7.8-intel-2016.01</br>2.7.9</br>2.7.9-foss-2015b</br>2.7.9-foss-2015g</br>2.7.9-gompi-2015e</br>2.7.9-ictce-7.3.5</br>2.7.9-intel-2015b</br>2.7.9-intel-2016.01</br>2.7.9-GNU-5.1.0-2.25</br>2.7.10-GCC-4.9.3-2.25-bare</br>2.7.10-GNU-4.9.3-2.25-bare</br>2.7.11-foss-2016a</br>2.7.11-intel-2015b</br>2.7.11-intel-2016a</br>2.7.11-intel-2017.00</br>2.7.11-intel-2017a</br>2.7.11-GCC-4.9.3-2.25-bare</br>3.4.3-intel-2015b</br>3.5.1</br>3.5.1-intel-2016.01</br>3.5.1-intel-2017.00</br>3.5.2</br>3.5.2-foss-2016a</br>3.5.2-intel-2017.00 | `-S-`</br>`-S-`</br>`-SA`</br>`US-`</br>`USA`</br>`-S-`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`USA`</br>`USA` |
| python-meep | 1.4.2-intel-2015b-Python-2.7.9-Meep-1.3 | `-S-` |
| PyYAML | 3.11-intel-2015b-Python-2.7.9 | `-S-` |
| QCA | 2.1.0-foss-2015g | `-S-` |
| QEMU | 2.1.2-foss-2015b</br>2.1.2-GCC-4.4.7-system</br>2.1.2-GCC-4.4.7-system-VDE2 | `U--`</br>`US-`</br>`US-` |
| qemu | 2.1.0</br>2.1.0-vde2</br>2.1.2</br>2.1.2-vde2 | `--A`</br>`--A`</br>`--A`</br>`--A` |
| QGIS | 2.12.3-foss-2015g | `-S-` |
| Qt | 4.8.6</br>4.8.6-foss-2015g | `-SA`</br>`-S-` |
| QuantumESPRESSO | 5.4.0-intel-2017.00</br>6.0-intel-2017a | `-SA`</br>`-S-` |
| Qwt | 6.1.2-foss-2015g | `-S-` |
| R | 3.0.1</br>3.1.1</br>3.1.1-intel-2015b</br>3.2.3-foss-2015b</br>3.2.3-intel-2016.01 | `--A`</br>`--A`</br>`US-`</br>`US-`</br>`-S-` |
| Racket | 6.1.1-GNU-5.1.0-2.25 | `-S-` |
| racket | 6.0.1 | `--A` |
| relion | 1.2</br>1.3 | `--A`</br>`--A` |
| RELION | 1.3-intel-2015b | `-S-` |
| renderproto | 0.11</br>0.11-intel-2015b | `-SA`</br>`-S-` |
| Rstudio | 0.97 | `--A` |
| RStudio | 0.98.1103 | `-S-` |
| ruby | 2.0.0-p247 | `--A` |
| Ruby | 2.1.5-intel-2015b</br>2.3.1 | `-S-`</br>`-SA` |
| S4MPLE | 1.0.0 | `-S-` |
| samtools | 0.1.19 | `--A` |
| SAMtools | 1.3-foss-2015g | `-S-` |
| ScaLAPACK | 2.0.2-gompi-2015b-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2015e-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2015g-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2016.04-OpenBLAS-0.2.18-LAPACK-3.6.0</br>2.0.2-gompi-2016a-OpenBLAS-0.2.15-LAPACK-3.6.0</br>2.0.2-gompi-2017a-OpenBLAS-0.2.19-LAPACK-3.7.0</br>2.0.2-OpenBLAS-0.2.14-LAPACK-3.5.0 | `US-`</br>`US-`</br>`USA`</br>`-SA`</br>`USA`</br>`-SA`</br>`-S-` |
| Scalasca | 2.3.1-intel-2015b | `-S-` |
| scalasca2 | 2.0-gcc-openmpi</br>2.0-icc-impi | `--A`</br>`--A` |
| ScientificPython | 2.9.4-intel-2015b-Python-2.7.9</br>2.9.4-intel-2015b-Python-2.7.11</br>2.9.4-intel-2016.01-Python-2.7.9 | `-SA`</br>`-SA`</br>`-SA` |
| Scipion | 1.0.1-Java-1.8.0_112-OpenMPI-1.10.2-GCC-5.3.0-2.26 | `-S-` |
| scite | 3.4.3 | `--A` |
| SCons | 2.3.6-foss-2015g-Python-2.7.9</br>2.3.6-Python-2.7.9 | `-SA`</br>`-S-` |
| Score-P | 3.0-intel-2015b | `-S-` |
| scorep | 1.2.3-gcc-openmpi</br>1.2.3-icc-impi | `--A`</br>`--A` |
| SCOTCH | 5.1.12b_esmumps-foss-2015b</br>6.0.0_esmumps-intel-2015b</br>6.0.3-intel-2015b</br>6.0.3-intel-2016.01</br>6.0.4-intel-2015b</br>6.0.4-intel-2016.01</br>6.0.4-intel-2017a | `-S-`</br>`US-`</br>`US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| SDE | 7.41.0 | `-S-` |
| Serf | 1.3.8-foss-2015g | `-SA` |
| settarg | 7.2.2 | `USA` |
| Siesta | 4.1-b2-intel-2017.00 | `-S-` |
| Singularity | 2.2.1-GCC-6.3.0-2.27</br>2.2-GCC-6.3.0-2.27 | `-SA`</br>`--A` |
| SIONlib | 1.6.1-intel-2015b-tools</br>1.6.1-tools | `-S-`</br>`-SA` |
| SIP | 4.16.4-foss-2015g-Python-2.7.9</br>4.17-foss-2015g-Python-2.7.9</br>4.17-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
| slepc | 3.4.4-icc15-impi-mkl-dbg</br>3.4.4-icc15-impi-mkl-opt</br>3.7.2-icc16-impi5-mkl-dbg</br>3.7.2-icc16-impi5-mkl-opt | `--A`</br>`--A`</br>`--A`</br>`--A` |
| snpEff | 3.6 | `--A` |
| SnpEff | 4.1_G | `US-` |
| SnuCL | 1.3.3-gompi-2015e | `-S-` |
| Spark | 1.5.2 | `-S-` |
| spatialindex | 1.8.5-foss-2015g | `-S-` |
| SpatiaLite | 4.3.0a-foss-2015g | `-S-` |
| spGPU | master-GCC-4.9.3-2.25 | `--A` |
| SQLite | 3.8.8.1-foss-2016a</br>3.13.0-intel-2017a | `-SA`</br>`-S-` |
| Subversion | 1.8.16-foss-2015g | `-SA` |
| SuiteSparse | 4.4.3-intel-2015b-ParMETIS-4.0.3</br>4.4.3-intel-2016.01-ParMETIS-4.0.3</br>4.4.5-intel-2015b-METIS-5.1.0</br>4.4.6-intel-2015b-ParMETIS-4.0.3</br>4.5.3-intel-2017.00-METIS-5.1.0</br>4.5.3-intel-2017a-ParMETIS-4.0.3 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| SUMO | 0.25.0-foss-2015g</br>0.26.0-foss-2015g</br>0.27.1-foss-2015g | `-S-`</br>`-S-`</br>`-S-` |
| SWIG | 2.0.12-intel-2015b-Python-2.7.9</br>2.0.12-Python-2.7.9</br>3.0.7-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
| sympy | 0.7.6.1-intel-2015b-Python-2.7.11</br>0.7.6-intel-2015b-Python-2.7.9</br>0.7.6-intel-2016.01-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-` |
| Szip | 2.1</br>2.1-foss-2015b</br>2.1-foss-2015g</br>2.1-foss-2016a</br>2.1-ictce-7.3.5</br>2.1-intel-2015b</br>2.1-intel-2016.01</br>2.1-intel-2017.00</br>2.1-intel-2017a | `-SA`</br>`US-`</br>`-S-`</br>`USA`</br>`-S-`</br>`USA`</br>`-SA`</br>`-S-`</br>`-S-` |
| szip | 2.1 | `--A` |
| tbb | 4.3.5.187</br>4.4.2.152</br>13.5.192</br>14.0.1</br>15.2.164</br>15.3.187 | `-S-`</br>`USA`</br>`--A`</br>`--A`</br>`--A`</br>`--A` |
| Tcl | 8.6.3-foss-2016a</br>8.6.5-intel-2017a | `-SA`</br>`-S-` |
| tcl | 8.5.15 | `--A` |
| tcsh | 6.18.01-intel-2015b</br>6.19.00 | `US-`</br>`-SA` |
| tensorflow | 0.12.0 | `-S-` |
| tk | 8.5.15 | `--A` |
| Tk | 8.6.3-foss-2015b-no-X11</br>8.6.5-intel-2017a | `U--`</br>`-S-` |
| tmux | 2.3 | `-SA` |
| totalview | 8.12</br>8.13 | `--A`</br>`--A` |
| TotalView | 8.15.4-6-linux-x86-64 | `-S-` |
| trilinos | 11.2.3-gcc-openmpi-mkl-dbg</br>11.2.3-gcc-openmpi-mkl-opt</br>11.2.3-icc | `--A`</br>`--A`</br>`--A` |
| Trimmomatic | 0.35-Java-1.7.0_79 | `-S-` |
| turbovnc | 1.2.2</br>1.2.3 | `--A`</br>`-S-` |
| util-linux | 2.26.1</br>2.26.1-foss-2015g</br>2.28-intel-2016a | `-SA`</br>`-S-`</br>`--A` |
| Valgrind | 3.11.0-foss-2015b</br>3.11.0-intel-2015b | `-S-`</br>`-S-` |
| valgrind | 3.9.0-impi | `--A` |
| Vampir | 8.5.0</br>9.0.0 | `-SA`</br>`-S-` |
| vampir | 8.2 | `--A` |
| VampirServer | 8.5.0-intel-2015b</br>9.0.0-intel-2015b | `-S-`</br>`-S-` |
| VASP | 5.4.1-intel-2015b-24Jun15</br>5.4.1-intel-2015b-24Jun15-UV</br>5.4.1-intel-2017.00-24Jun15 | `-SA`</br>`U--`</br>`-SA` |
| vde2 | 2.3.2 | `--A` |
| VDE2 | 2.3.2-GCC-4.4.7-system | `US-` |
| VirtualGL | 2.4.1 | `-S-` |
| virtualgl | 2.4 | `--A` |
| VisIt | 2.10.0 | `US-` |
| VTune | 2016_update1</br>2017_update2 | `USA`</br>`-SA` |
| vtune_xe | 2013.15</br>2015.3.0.403110 | `--A`</br>`--A` |
| wien2k | 13.1</br>14.2 | `--A`</br>`--A` |
| wine | 1.7.29 | `--A` |
| Wine | 1.7.29-GCC-4.4.7-system</br>1.7.29-GNU-5.1.0-2.25 | `-S-`</br>`-S-` |
| xbitmaps | 1.1.1</br>1.1.1-foss-2015g</br>1.1.1-intel-2015b | `-SA`</br>`-S-`</br>`-S-` |
| xcb-proto | 1.10-Python-2.7.8</br>1.11</br>1.11-foss-2015g-Python-2.7.9</br>1.11-intel-2015b-Python-2.7.9</br>1.11-Python-2.7.9 | `-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA` |
| xdrfile | 1.1.4-foss-2015g</br>1.1.4-foss-2016a</br>1.1.4-intel-2015b | `-SA`</br>`-SA`</br>`--A` |
| xextproto | 7.3.0</br>7.3.0-foss-2015g</br>7.3.0-foss-2016a</br>7.3.0-intel-2015b</br>7.3.0-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| xineramaproto | 1.2.1</br>1.2.1-intel-2015b | `-SA`</br>`-S-` |
| xorg-macros | 1.17</br>1.17-foss-2015g</br>1.19.0-foss-2016a</br>1.19.0-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-` |
| xproto | 7.0.26</br>7.0.26-foss-2015g</br>7.0.26-intel-2015b</br>7.0.28</br>7.0.28-foss-2016a</br>7.0.28-intel-2016a | `-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| xtrans | 1.3.4</br>1.3.4-intel-2015b</br>1.3.5</br>1.3.5-foss-2015g</br>1.3.5-foss-2016a</br>1.3.5-intel-2015b</br>1.3.5-intel-2016a | `--A`</br>`-S-`</br>`-SA`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-S-` |
| XZ | 5.2.2</br>5.2.2-foss-2016a</br>5.2.2-intel-2016.01</br>5.2.2-intel-2017.00 | `-SA`</br>`USA`</br>`-S-`</br>`USA` |
| zlib | 1.2.5</br>1.2.8-GCC-4.4.7-system</br>1.2.8-GCC-4.9.3</br>1.2.11-GCCcore-6.3.0 | `--A`</br>`U--`</br>`--A`</br>`-SA` |
---8<--- "modules_matrix_search.md"
# Available Modules
## Compiler
| Module | Description |
| ------ | ----------- |
| [icc](http://software.intel.com/en-us/intel-compilers/) | Intel C and C++ compilers |
## Devel
| Module | Description |
| ------ | ----------- |
| devel_environment | &nbsp; |
| M4 | &nbsp; |
| ncurses | &nbsp; |
## Lang
| Module | Description |
| ------ | ----------- |
| [Bison](http://www.gnu.org/software/bison) | Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. |
| [flex](http://flex.sourceforge.net/) | Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text. |
| [Tcl](http://www.tcl.tk/) | Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more. |
## Lib
| Module | Description |
| ------ | ----------- |
| [libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html) | The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands. |
| [zlib](http://www.zlib.net/) | zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system. |
## Math
| Module | Description |
| ------ | ----------- |
| [Octave](http://www.gnu.org/software/octave/) | GNU Octave is a high-level interpreted language, primarily intended for numerical computations. |
## Mpi
| Module | Description |
| ------ | ----------- |
| [impi](http://software.intel.com/en-us/intel-mpi-library/) | Intel MPI Library, compatible with MPICH ABI |
## Toolchain
| Module | Description |
| ------ | ----------- |
| [iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C, C++ & Fortran compilers |
| [ifort](http://software.intel.com/en-us/intel-compilers/) | Intel Fortran compiler |
## Tools
| Module | Description |
| ------ | ----------- |
| bzip2 | &nbsp; |
| cURL | &nbsp; |
| [expat](http://expat.sourceforge.net/) | Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags) |
## Vis
| Module | Description |
| ------ | ----------- |
| gettext | &nbsp; |
# Available Modules
## Bio
| Module | Description |
| ------ | ----------- |
| [FastQC](http://www.bioinformatics.babraham.ac.uk/projects/download.html) | A quality control application for high throughput sequence data |
| [GATK](http://www.broadinstitute.org/gatk/) | The Genome Analysis Toolkit or GATK is a software package developed at the Broad Institute to analyse next-generation resequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size. |
| [SnpEff](http://snpeff.sourceforge.net/) | Genetic variant annotation and effect prediction toolbox. |
## Cae
| Module | Description |
| ------ | ----------- |
| COMSOL | &nbsp; |
| [OpenFOAM](http://www.openfoam.com/) | OpenFOAM is a free, open source CFD software package. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, to solid dynamics and electromagnetics. |
## Chem
| Module | Description |
| ------ | ----------- |
| [ABINIT](http://www.abinit.org/) | Abinit is a plane wave pseudopotential code for doing condensed phase electronic structure calculations using DFT. |
| [Libint](https://sourceforge.net/p/libint/) | Libint library is used to evaluate the traditional (electron repulsion) and certain novel two-body matrix elements (integrals) over Cartesian Gaussian functions used in modern atomic and molecular theory. |
| [libxc](http://www.tddft.org/programs/octopus/wiki/index.php/Libxc) | Libxc is a library of exchange-correlation functionals for density-functional theory. The aim is to provide a portable, well tested and reliable set of exchange and correlation functionals. |
## Compiler
| Module | Description |
| ------ | ----------- |
| [GCC](http://gcc.gnu.org/) | The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). |
| GCCcore | &nbsp; |
| icc | &nbsp; |
| [ifort](http://software.intel.com/en-us/intel-compilers/) | Fortran compiler from Intel |
| [LLVM](http://llvm.org/) | The LLVM Core libraries provide a modern source- and target-independent optimizer, along with code generation support for many popular CPUs (as well as some less common ones!) These libraries are built around a well specified code representation known as the LLVM intermediate representation ("LLVM IR"). The LLVM Core libraries are well documented, and it is particularly easy to invent your own language (or port an existing compiler) to use LLVM as an optimizer and code generator. |
## Data
| Module | Description |
| ------ | ----------- |
| [GDAL](http://www.gdal.org/) | GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing. |
| [HDF5](http://www.hdfgroup.org/HDF5/) | HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections. |
| [netCDF](http://www.unidata.ucar.edu/software/netcdf/) | NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. |
| [netCDF-Fortran](http://www.unidata.ucar.edu/software/netcdf/) | NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. |
## Devel
| Module | Description |
| ------ | ----------- |
| [Autoconf](http://www.gnu.org/software/autoconf/) | Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls. |
| [Automake](http://www.gnu.org/software/automake/automake.html) | Automake: GNU Standards-compliant Makefile generator |
| [Autotools](http://autotools.io) | This bundle collect the standard GNU build tools: Autoconf, Automake and libtool |
| [Boost](http://www.boost.org/) | Boost provides free peer-reviewed portable C++ source libraries. |
| [CMake](http://www.cmake.org) | CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software. |
| [Doxygen](http://www.doxygen.org) | Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D. |
| [M4](http://www.gnu.org/software/m4/m4.html) | GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc. |
| [make](http://www.gnu.org/software/make/make.html) | make-3.82: GNU version of make utility |
| [Mako](http://www.makotemplates.org) | A super-fast templating language that borrows the best ideas from the existing templating languages |
| [Maven](http://maven.apache.org/index.html) | Binary maven install, Apache Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information. |
| [ncurses](http://www.gnu.org/software/ncurses/) | The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses. |
| [PCRE](http://www.pcre.org/) | The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. |
| [pkg-config](http://www.freedesktop.org/wiki/Software/pkg-config/) | pkg-config is a helper tool used when compiling applications and libraries. It helps you insert the correct compiler options on the command line so an application can use gcc -o test test.c `pkg-config --libs --cflags glib-2.0` for instance, rather than hard-coding values on where to find glib (or other libraries). |
| [Qt](http://qt-project.org/) | Qt is a comprehensive cross-platform C++ application framework. |
| [Qt5](http://qt.io/) | Qt is a comprehensive cross-platform C++ application framework. |
| [SQLite](http://www.sqlite.org/) | SQLite: SQL Database Engine in a C Library |
| [SWIG](http://www.swig.org/) | SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages. |
| [xorg-macros](http://cgit.freedesktop.org/xorg/util/macros) | X.org macros utilities. |
## Lang
| Module | Description |
| ------ | ----------- |
| [Bison](http://www.gnu.org/software/bison) | Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. |
| flex | &nbsp; |
| [Java](http://java.com/) | Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers. |
| [Lua](http://www.lua.org/) | Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping. |
| [Mono](http://mono-framework.com) | An open source, cross-platform, implementation of C# and the CLR that is binary compatible with Microsoft.NET. |
| [NASM](http://www.nasm.us/) | NASM: General-purpose x86 assembler |
| [OpenCL-runtime](https://software.intel.com/en-us/intel-opencl) | OpenCL™ is the first open, royalty-free standard for cross-platform, parallel programming of modern processors found in personal computers, servers and handheld/embedded devices. OpenCL (Open Computing Language) greatly improves speed and responsiveness for a wide spectrum of applications in numerous market categories from gaming and entertainment to scientific and medical software. |
| [Perl](http://www.perl.org/) | Larry Wall's Practical Extraction and Report Language |
| [Python](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. |
| [R](http://www.r-project.org/) | R is a free software environment for statistical computing and graphics. |
| [Tcl](http://www.tcl.tk/) | Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more. |
## Lib
| Module | Description |
| ------ | ----------- |
| [libdrm](http://dri.freedesktop.org) | Direct Rendering Manager runtime library. |
| [libffi](http://sourceware.org/libffi/) | The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time. |
| [libjpeg-turbo](http://sourceforge.net/libjpeg-turbo/) | libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding. |
| [libpng](http://www.libpng.org/pub/png/libpng.html) | libpng is the official PNG reference library |
| [libpthread-stubs](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. |
| [libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html) | The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands. |
| [libsndfile](http://www.mega-nerd.com/libsndfile) | Libsndfile is a C library for reading and writing files containing sampled sound (such as MS Windows WAV and the Apple/SGI AIFF format) through one standard library interface. |
| [LibTIFF](http://www.remotesensing.org/libtiff/) | tiff: Library and tools for reading and writing TIFF data files |
| libtool | &nbsp; |
| [libxml2](http://xmlsoft.org/) | Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform). |
| [nettle](http://www.lysator.liu.se/~nisse/nettle/) | Nettle is a cryptographic library that is designed to fit easily in more or less any context: In crypto toolkits for object-oriented languages (C++, Python, Pike, ...), in applications like LSH or GNUPG, or even in kernel space. |
| [PROJ](http://trac.osgeo.org/proj/) | Program proj is a standard Unix filter function which converts geographic longitude and latitude coordinates into cartesian coordinates |
| [tbb](http://software.intel.com/en-us/articles/intel-tbb/) | Intel Threading Building Blocks 4.0 (Intel TBB) is a widely used, award-winning C++ template library for creating reliable, portable, and scalable parallel applications. Use Intel TBB for a simple and rapid way of developing robust task-based parallel applications that scale to available processor cores, are compatible with multiple environments, and are easier to maintain. Intel TBB is the most proficient way to implement future-proof parallel applications that tap into the power and performance of multicore and manycore hardware platforms. |
| [zlib](http://www.zlib.net/) | zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system. |
## Math
| Module | Description |
| ------ | ----------- |
| [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page) | Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms. |
| [GEOS](http://trac.osgeo.org/geos) | GEOS (Geometry Engine - Open Source) is a C++ port of the Java Topology Suite (JTS) |
| GMP | &nbsp; |
| [METIS](http://glaros.dtc.umn.edu/gkhome/metis/metis/overview) | METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill reducing orderings for sparse matrices. The algorithms implemented in METIS are based on the multilevel recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes. |
| [MPFR](http://www.mpfr.org) | The MPFR library is a C library for multiple-precision floating-point computations with correct rounding. |
| [SCOTCH](http://gforge.inria.fr/projects/scotch/) | Software package and libraries for sequential and parallel graph partitioning, static mapping, and sparse matrix block ordering, and sequential mesh and hypergraph partitioning. |
## Mpi
| Module | Description |
| ------ | ----------- |
| [impi](http://software.intel.com/en-us/intel-mpi-library/) | The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification. |
| [OpenMPI](http://www.open-mpi.org/) | The Open MPI Project is an open source MPI-2 implementation. |
## Numlib
| Module | Description |
| ------ | ----------- |
| [CGAL](http://www.cgal.org/) | The goal of the CGAL Open Source Project is to provide easy access to efficient and reliable geometric algorithms in the form of a C++ library. |
| [cuDNN](https://developer.nvidia.com/cudnn) | The NVIDIA CUDA Deep Neural Network library (cuDNN) is a GPU-accelerated library of primitives for deep neural networks. |
| [FFTW](http://www.fftw.org) | FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data. |
| [imkl](http://software.intel.com/en-us/intel-mkl/) | Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more. |
| [NLopt](http://ab-initio.mit.edu/wiki/index.php/NLopt) | NLopt is a free/open-source library for nonlinear optimization, providing a common interface for a number of different free optimization routines available online as well as original implementations of various other algorithms. |
| [OpenBLAS](http://xianyi.github.com/OpenBLAS/) | OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version. |
| [ScaLAPACK](http://www.netlib.org/scalapack/) | The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers. |
## Phys
| Module | Description |
| ------ | ----------- |
| [VASP](http://www.vasp.at) | The Vienna Ab initio Simulation Package (VASP) is a computer program for atomic scale materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics, from first principles. |
## System
| Module | Description |
| ------ | ----------- |
| CUDA | &nbsp; |
| [hwloc](http://www.open-mpi.org/projects/hwloc/) | The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. |
| [libpciaccess](http://cgit.freedesktop.org/xorg/lib/libpciaccess/) | Generic PCI access library. |
## Toolchain
| Module | Description |
| ------ | ----------- |
| [foss]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK. |
| [GNU](http://www.gnu.org/software/) | Compiler-only toolchain with GCC and binutils. |
| [gompi]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support. |
| [iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C, C++ and Fortran compilers |
| [iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C/C++ and Fortran compilers, alongside Intel MPI. |
| [intel](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL. |
| [PRACE](http://www.prace-ri.eu/PRACE-Common-Production) | The PRACE Common Production Environment (PCPE) is a set of software tools and libraries that are planned to be available on all PRACE execution sites. The PCPE also defines a set of environment variables that try to make compilation on all sites as homogeneous and simple as possible. |
## Tools
| Module | Description |
| ------ | ----------- |
| [Bash](http://www.gnu.org/software/bash) | Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh). |
| [binutils](http://directory.fsf.org/project/binutils/) | binutils: GNU binary utilities |
| [bzip2](http://www.bzip.org/) | bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression. |
| [cURL](http://curl.haxx.se) | libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more. |
| [DMTCP](http://dmtcp.sourceforge.net/index.html) | DMTCP (Distributed MultiThreaded Checkpointing) transparently checkpoints a single-host or distributed computation in user-space -- with no modifications to user code or to the O/S. |
| EasyBuild | &nbsp; |
| [expat](http://expat.sourceforge.net/) | Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags) |
| [git](http://git-scm.com/) | Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency. |
| [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress |
| MATLAB | &nbsp; |
| [Mercurial](http://mercurial.selenic.com/) | Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface. |
| [numactl](http://oss.sgi.com/projects/libnuma/) | The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program. |
| pigz | &nbsp; |
| [QEMU](http://wiki.qemu.org/Main_Page) | QEMU is a generic and open source machine emulator and virtualizer. |
| [RStudio](https://www.rstudio.com) | RStudio is a set of integrated tools designed to help you be more productive with R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management. |
| Singularity | &nbsp; |
| [Szip](http://www.hdfgroup.org/doc_resource/SZIP/) | Szip compression software, providing lossless compression of scientific data |
| [tcsh](http://www.tcsh.org) | Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax. |
| [util-linux](http://www.kernel.org/pub/linux/utils/util-linux) | Set of Linux utilities |
| [VDE2](http://vde.sourceforge.net) | VDE is an ethernet compliant virtual network that can be spawned over a set of physical computer over the Internet. VDE is part of virtualsquare project. |
| [VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe) | Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java. |
| [XZ](http://tukaani.org/xz/) | xz: XZ utilities |
## Vis
| Module | Description |
| ------ | ----------- |
| [cairo](http://cairographics.org) | Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. Experimental backends include OpenGL, BeOS, OS/2, and DirectFB |
| [fontconfig](http://www.freedesktop.org/software/fontconfig) | Fontconfig is a library designed to provide system-wide font configuration, customization and application access. |
| [freetype](http://freetype.org) | FreeType 2 is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). It can be used in graphics libraries, display servers, font conversion tools, text image generation tools, and many other products as well. |
| [gettext](http://www.gnu.org/software/gettext/) | GNU `gettext' is an important step for the GNU Translation Project, as it is an asset on which we may build many other steps. This package offers to programmers, translators, and even users, a well integrated set of tools and documentation |
| [GLib](http://www.gtk.org/) | GLib is one of the base libraries of the GTK+ project |
| [JasPer](http://www.ece.uvic.ca/~frodo/jasper/) | The JasPer Project is an open-source initiative to provide a free software-based reference implementation of the codec specified in the JPEG-2000 Part-1 standard. |
| [libGLU](ftp://ftp.freedesktop.org/pub/mesa/glu/) | The OpenGL Utility Library (GLU) is a computer graphics library for OpenGL. |
| [Mesa](http://www.mesa3d.org/) | Mesa is an open-source implementation of the OpenGL specification - a system for rendering interactive 3D graphics. |
| [ParaView](http://www.paraview.org) | ParaView is a scientific parallel visualizer. |
| [pixman](http://www.pixman.org/) | Pixman is a low-level software library for pixel manipulation, providing features such as image compositing and trapezoid rasterization. Important users of pixman are the cairo graphics library and the X server. |
| [Tk](http://www.tcl.tk/) | Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages. |
| [VisIt](https://wci.llnl.gov/simulation/computer-codes/visit) | VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool |
| [X11](https://www.x.org) | The X Window System (X11) is a windowing system for bitmap displays |
# Available Modules
## Vis
| Module | Description |
| ------ | ----------- |
| [cairo](http://cairographics.org) | Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. Experimental backends include OpenGL, BeOS, OS/2, and DirectFB |
| [fontconfig](http://www.freedesktop.org/software/fontconfig) | Fontconfig is a library designed to provide system-wide font configuration, customization and application access. |
| [freetype](http://freetype.org) | FreeType 2 is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). It can be used in graphics libraries, display servers, font conversion tools, text image generation tools, and many other products as well. |
| [gettext](http://www.gnu.org/software/gettext/) | GNU `gettext' is an important step for the GNU Translation Project, as it is an asset on which we may build many other steps. This package offers to programmers, translators, and even users, a well integrated set of tools and documentation |
| [GLib](http://www.gtk.org/) | GLib is one of the base libraries of the GTK+ project |
| [JasPer](http://www.ece.uvic.ca/~frodo/jasper/) | The JasPer Project is an open-source initiative to provide a free software-based reference implementation of the codec specified in the JPEG-2000 Part-1 standard. |
| [libGLU](ftp://ftp.freedesktop.org/pub/mesa/glu/) | The OpenGL Utility Library (GLU) is a computer graphics library for OpenGL. |
| [Mesa](http://www.mesa3d.org/) | Mesa is an open-source implementation of the OpenGL specification - a system for rendering interactive 3D graphics. |
| [ParaView](http://www.paraview.org) | ParaView is a scientific parallel visualizer. |
| [pixman](http://www.pixman.org/) | Pixman is a low-level software library for pixel manipulation, providing features such as image compositing and trapezoid rasterization. Important users of pixman are the cairo graphics library and the X server. |
| [Tk](http://www.tcl.tk/) | Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages. |
| [VisIt](https://wci.llnl.gov/simulation/computer-codes/visit) | VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool |
| [X11](https://www.x.org) | The X Window System (X11) is a windowing system for bitmap displays |
* ![pdf](img/pdf.png)[PBS Pro Programmer's Guide](http://www.pbsworks.com/pdfs/PBSProgramGuide13.0.pdf)
* ![pdf](img/pdf.png)[PBS Pro Quick Start Guide](http://www.pbsworks.com/pdfs/PBSQuickStartGuide13.0.pdf)
* ![pdf](img/pdf.png)[PBS Pro Reference Guide](http://www.pbsworks.com/pdfs/PBSReferenceGuide13.0.pdf)
* ![pdf](img/pdf.png)[PBS Pro User's Guide](http://www.pbsworks.com/pdfs/PBSUserGuide13.0.pdf)
# PRACE User Support
## Introduction
PRACE users coming to the TIER-1 systems offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](/general/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required.
All general [PRACE User Documentation](http://www.prace-ri.eu/user-documentation/) should be read before continuing reading the local documentation here.
## Help and Support
If you have any troubles, need information, request support or want to install additional software, use [PRACE Helpdesk](http://www.prace-ri.eu/helpdesk-guide264/).
Information about the local services are provided in the [introduction of general user documentation Salomon](/salomon/introduction/) and [introduction of general user documentation Anselm](/anselm/introduction/). Please keep in mind, that standard PRACE accounts don't have a password to access the web interface of the local (IT4Innovations) request tracker and thus a new ticket should be created by sending an e-mail to support[at]it4i.cz.
## Obtaining Login Credentials
In general PRACE users already have a PRACE account setup through their HOMESITE (institution from their country) as a result of rewarded PRACE project proposal. This includes signed PRACE AuP, generated and registered certificates, etc.
If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on a cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so see the corresponding [section of the general documentation here](/general/obtaining-login-credentials/obtaining-login-credentials/).
## Accessing the Cluster
### Access With GSI-SSH
For all PRACE users the method for interactive access (login) and data transfer based on grid services from Globus Toolkit (GSI SSH and GridFTP) is supported.
The user will need a valid certificate and to be present in the PRACE LDAP (contact your HOME SITE or the primary investigator of your project for LDAP account creation).
Most of the information needed by PRACE users accessing the TIER-1 systems can be found here:
* [General user's FAQ](http://www.prace-ri.eu/Users-General-FAQs)
* [Certificates FAQ](http://www.prace-ri.eu/Certificates-FAQ)
* [Interactive access using GSISSH](http://www.prace-ri.eu/Interactive-Access-Using-gsissh)
* [Data transfer with GridFTP](http://www.prace-ri.eu/Data-Transfer-with-GridFTP-Details)
* [Data transfer with gtransfer](http://www.prace-ri.eu/Data-Transfer-with-gtransfer)
Before you start to use any of the services don't forget to create a proxy certificate from your certificate:
```console
$ grid-proxy-init
```
To check whether your proxy certificate is still valid (by default it's valid 12 hours), use:
```console
$ grid-proxy-info
```
To access the cluster, several login nodes running GSI SSH service are available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners).
#### Access From PRACE Network:
It is recommended to use the single DNS name **name-cluster**-prace.it4i.cz which is distributed between the four login nodes. If needed, user can login directly to one of the login nodes. The addresses are:
For Salomon cluster:
| Login address | Port | Protocol | Login node |
| ---------------------------- | ---- | -------- | -------------------------------- |
| salomon-prace.it4i.cz | 2222 | gsissh | login1, login2, login3 or login4 |
| login1-prace.salomon.it4i.cz | 2222 | gsissh | login1 |
| login2-prace.salomon.it4i.cz | 2222 | gsissh | login2 |
| login3-prace.salomon.it4i.cz | 2222 | gsissh | login3 |
| login4-prace.salomon.it4i.cz | 2222 | gsissh | login4 |
```console
$ gsissh -p 2222 salomon-prace.it4i.cz
```
For Anselm cluster:
| Login address | Port | Protocol | Login node |
| --------------------------- | ---- | -------- | ---------------- |
| anselm-prace.it4i.cz | 2222 | gsissh | login1 or login2 |
| login1-prace.anselm.it4i.cz | 2222 | gsissh | login1 |
| login2-prace.anselm.it4i.cz | 2222 | gsissh | login2 |
```console
$ gsissh -p 2222 anselm-prace.it4i.cz
```
When logging from other PRACE system, the prace_service script can be used:
```console
$ gsissh `prace_service -i -s salomon`
```
```console
$ gsissh `prace_service -i -s anselm`
```
#### Access From Public Internet:
It is recommended to use the single DNS name **name-cluster**.it4i.cz which is distributed between the four login nodes. If needed, user can login directly to one of the login nodes. The addresses are:
For Salomon cluster:
| Login address | Port | Protocol | Login node |
| ---------------------------- | ---- | -------- | -------------------------------- |
| salomon.it4i.cz | 2222 | gsissh | login1, login2, login3 or login4 |
| login1.salomon.it4i.cz | 2222 | gsissh | login1 |
| login2-prace.salomon.it4i.cz | 2222 | gsissh | login2 |
| login3-prace.salomon.it4i.cz | 2222 | gsissh | login3 |
| login4-prace.salomon.it4i.cz | 2222 | gsissh | login4 |
```console
$ gsissh -p 2222 salomon.it4i.cz
```
For Anselm cluster:
| Login address | Port | Protocol | Login node |
| --------------------- | ---- | -------- | ---------------- |
| anselm.it4i.cz | 2222 | gsissh | login1 or login2 |
| login1.anselm.it4i.cz | 2222 | gsissh | login1 |
| login2.anselm.it4i.cz | 2222 | gsissh | login2 |
```console
$ gsissh -p 2222 anselm.it4i.cz
```
When logging from other PRACE system, the prace_service script can be used:
```console
$ gsissh `prace_service -e -s salomon`
```
```console
$ gsissh `prace_service -e -s anselm`
```
Although the preferred and recommended file transfer mechanism is [using GridFTP](prace/#file-transfers), the GSI SSH
implementation supports also SCP, so for small files transfer gsiscp can be used:
```console
$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ salomon.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_
$ gsiscp -P 2222 salomon.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_
$ gsiscp -P 2222 salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
```
```console
$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ anselm.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_
$ gsiscp -P 2222 anselm.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
$ gsiscp -P 2222 _LOCAL_PATH_TO_YOUR_FILE_ anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_
$ gsiscp -P 2222 anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_
```
### Access to X11 Applications (VNC)
If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, see the [section in general documentation](/general/accessing-the-clusters/graphical-user-interface/x-window-system/).
If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH:
```console
$ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961
```
### Access With SSH
After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information see [the section in general documentation for Salomon](/salomon/shell-and-data-access/) and [the section in general documentation for Anselm](/anselm/shell-and-data-access/).
## File Transfers
PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, see [the section in the general documentation for Salomon](/salomon/shell-and-data-access/) and [the section in general documentation for Anselm](/anselm/shell-and-data-access/).
Apart from the standard mechanisms, for PRACE users to transfer data to/from Salomon cluster, a GridFTP server running Globus Toolkit GridFTP service is available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners).
There's one control server and three backend servers for striping and/or backup in case one of them would fail.
### Access From PRACE Network
For Salomon cluster:
| Login address | Port | Node role |
| ----------------------------- | ---- | --------------------------- |
| gridftp-prace.salomon.it4i.cz | 2812 | Front end /control server |
| lgw1-prace.salomon.it4i.cz | 2813 | Backend / data mover server |
| lgw2-prace.salomon.it4i.cz | 2813 | Backend / data mover server |
| lgw3-prace.salomon.it4i.cz | 2813 | Backend / data mover server |
Copy files **to** Salomon by running the following commands on your local machine:
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
```
For Anselm cluster:
| Login address | Port | Node role |
| ---------------------------- | ---- | --------------------------- |
| gridftp-prace.anselm.it4i.cz | 2812 | Front end /control server |
| login1-prace.anselm.it4i.cz | 2813 | Backend / data mover server |
| login2-prace.anselm.it4i.cz | 2813 | Backend / data mover server |
| dm1-prace.anselm.it4i.cz | 2813 | Backend / data mover server |
Copy files **to** Anselm by running the following commands on your local machine:
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
```
Or by using prace_service script:
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
```
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
```
Copy files **from** Salomon:
```console
$ globus-url-copy gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
Copy files **from** Anselm:
```console
$ globus-url-copy gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
Or by using prace_service script:
```console
$ globus-url-copy gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
```console
$ globus-url-copy gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
### Access From Public Internet
For Salomon cluster:
| Login address | Port | Node role |
| ----------------------- | ---- | --------------------------- |
| gridftp.salomon.it4i.cz | 2812 | Front end /control server |
| lgw1.salomon.it4i.cz | 2813 | Backend / data mover server |
| lgw2.salomon.it4i.cz | 2813 | Backend / data mover server |
| lgw3.salomon.it4i.cz | 2813 | Backend / data mover server |
Copy files **to** Salomon by running the following commands on your local machine:
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
```
For Anselm cluster:
| Login address | Port | Node role |
| ---------------------- | ---- | --------------------------- |
| gridftp.anselm.it4i.cz | 2812 | Front end /control server |
| login1.anselm.it4i.cz | 2813 | Backend / data mover server |
| login2.anselm.it4i.cz | 2813 | Backend / data mover server |
| dm1.anselm.it4i.cz | 2813 | Backend / data mover server |
Copy files **to** Anselm by running the following commands on your local machine:
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
```
Or by using prace_service script:
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_
```
```console
$ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_
```
Copy files **from** Salomon:
```console
$ globus-url-copy gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
Copy files **from** Anselm:
```console
$ globus-url-copy gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
Or by using prace_service script:
```console
$ globus-url-copy gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
```console
$ globus-url-copy gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_
```
Generally both shared file systems are available through GridFTP:
| File system mount point | Filesystem | Comment |
| ----------------------- | ---------- | -------------------------------------------------------------- |
| /home | Lustre | Default HOME directories of users in format /home/prace/login/ |
| /scratch | Lustre | Shared SCRATCH mounted on the whole cluster |
More information about the shared file systems is available [for Salomon here](/salomon/storage/) and [for anselm here](/anselm/storage).
!!! hint
`prace` directory is used for PRACE users on the SCRATCH file system.
Only Salomon cluster /scratch:
| Data type | Default path |
| ---------------------------- | ------------------------------- |
| large project files | /scratch/work/user/prace/login/ |
| large scratch/temporary data | /scratch/temp/ |
## Usage of the Cluster
There are some limitations for PRACE user when using the cluster. By default PRACE users aren't allowed to access special queues in the PBS Pro to have high priority or exclusive access to some special equipment like accelerated nodes and high memory (fat) nodes. There may be also restrictions obtaining a working license for the commercial software installed on the cluster, mostly because of the license agreement or because of insufficient amount of licenses.
For production runs always use scratch file systems. The available file systems are described [for Salomon here](/salomon/storage/) and [for Anselm here](/anselm/storage).
### Software, Modules and PRACE Common Production Environment
All system wide installed software on the cluster is made available to the users via the modules. The information about the environment and modules usage is in this [section of general documentation](environment-and-modules/).
PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/prace-common-production-environment/).
```console
$ ml prace
```
### Resource Allocation and Job Execution
General information about the resource allocation, job queuing and job execution is in this [section of general documentation for Salomon](/salomon/resources-allocation-policy/) and [section of general documentation for Anselm](/anselm/resources-allocation-policy/).
For PRACE users, the default production run queue is "qprace". PRACE users can also use two other queues "qexp" and "qfree".
For Salomon:
| queue | Active project | Project resources | Nodes | priority | authorization | walltime |
| ----------------------------- | -------------- | ----------------- | -------------------------- | -------- | ------------- | --------- |
| **qexp** Express queue | no | none required | 32 nodes, max 8 per user | 150 | no | 1 / 1 h |
| **qprace** Production queue | yes | >0 | 1006 nodes, max 86 per job | 0 | no | 24 / 48 h |
| **qfree** Free resource queue | yes | none required | 752 nodes, max 86 per job | -1024 | no | 12 / 12 h |
For Anselm:
| queue | Active project | Project resources | Nodes | priority | authorization | walltime |
| ----------------------------- | -------------- | ----------------- | ------------------- | -------- | ------------- | --------- |
| **qexp** Express queue | no | none required | 2 reserved, 8 total | high | no | 1 / 1h |
| **qprace** Production queue | yes | > 0 | 178 w/o accelerator | medium | no | 24 / 48 h |
| **qfree** Free resource queue | yes | none required | 178 w/o accelerator | very low | no | 12 / 12 h |
**qprace**, the PRACE This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprace. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprace is 48 hours. If the job needs longer time, it must use checkpoint/restart functionality.
### Accounting & Quota
The resources that are currently subject to accounting are the core hours. The core hours are accounted on the wall clock basis. The accounting runs whenever the computational cores are allocated or blocked via the PBS Pro workload manager (the qsub command), regardless of whether the cores are actually used for any calculation. See [example in the general documentation for Salomon](/salomon/resources-allocation-policy/) and [example in the general documentation for Anselm](/anselm/resources-allocation-policy/).
PRACE users should check their project accounting using the [PRACE Accounting Tool (DART)](http://www.prace-ri.eu/accounting-report-tool/).
Users who have undergone the full local registration procedure (including signing the IT4Innovations Acceptable Use Policy) and who have received local password may check at any time, how many core-hours have been consumed by themselves and their projects using the command "it4ifree". You need to know your user password to use the command and that the displayed core hours are "system core hours" which differ from PRACE "standardized core hours".
!!! note
The **it4ifree** command is a part of it4i.portal.clients package, [located here](https://pypi.python.org/pypi/it4i.portal.clients).
```console
$ it4ifree
Password:
PID Total Used ...by me Free
-------- ------- ------ -------- -------
OPEN-0-0 1500000 400644 225265 1099356
DD-13-1 10000 2606 2606 7394
```
By default file system quota is applied. To check the current status of the quota (separate for HOME and SCRATCH) use
```console
$ quota
$ lfs quota -u USER_LOGIN /scratch
```
If the quota is insufficient, contact the [support](prace/#help-and-support) and request an increase.
Sitemap: https://docs.it4i.cz/sitemap.xml.gz
User-agent: *
Disallow:
# 7D Enhanced Hypercube
![](../img/7D_Enhanced_hypercube.png)
| Node type | Count | Short name | Long name | Rack |
| ------------------------------------ | ----- | ---------------- | ------------------------ | ----- |
| M-Cell compute nodes w/o accelerator | 576 | cns1 -cns576 | r1i0n0 - r4i7n17 | 1-4 |
| compute nodes MIC accelerated | 432 | cns577 - cns1008 | r21u01n577 - r37u31n1008 | 21-38 |
## IB Topology
![](../img/Salomon_IB_topology.png)
# Capacity Computing
## Introduction
In many cases, it is useful to submit huge (100+) number of computational jobs into the PBS queue system. Huge number of (small) jobs is one of the most effective ways to execute embarrassingly parallel calculations, achieving best runtime, throughput and computer utilization.
However, executing huge number of jobs via the PBS queue may strain the system. This strain may result in slow response to commands, inefficient scheduling and overall degradation of performance and user experience, for all users. For this reason, the number of jobs is **limited to 100 per user, 1500 per job array**
!!! note
Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time.
* Use [Job arrays](#job-arrays) when running huge number of [multithread](#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs
* Use [GNU parallel](#gnu-parallel) when running single core jobs
* Combine [GNU parallel with Job arrays](#job-arrays-and-gnu-parallel) when running huge number of single core jobs
## Policy
1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](#job-arrays).
1. The array size is at most 1500 subjobs.
## Job Arrays
!!! note
Huge number of jobs may be easily submitted and managed as a job array.
A job array is a compact representation of many jobs, called subjobs. The subjobs share the same job script, and have the same values for all attributes and resources, with the following exceptions:
* each subjob has a unique index, $PBS_ARRAY_INDEX
* job Identifiers of subjobs only differ by their indices
* the state of subjobs can differ (R,Q,...etc.)
All subjobs within a job array have the same scheduling priority and schedule as independent jobs. Entire job array is submitted through a single qsub command and may be managed by qdel, qalter, qhold, qrls and qsig commands as a single job.
### Shared jobscript
All subjobs in job array use the very same, single jobscript. Each subjob runs its own instance of the jobscript. The instances execute different work controlled by $PBS_ARRAY_INDEX variable.
Example:
Assume we have 900 input files with name beginning with "file" (e. g. file001, ..., file900). Assume we would like to use each of these input files with program executable myprog.x, each as a separate job.
First, we create a tasklist file (or subjobs list), listing all tasks (subjobs) - all input files in our example:
```console
$ find . -name 'file*' > tasklist
```
Then we create jobscript:
```bash
#!/bin/bash
#PBS -A PROJECT_ID
#PBS -q qprod
#PBS -l select=1:ncpus=24,walltime=02:00:00
# change to scratch directory
SCR=/scratch/work/user/$USER/$PBS_JOBID
mkdir -p $SCR ; cd $SCR || exit
# get individual tasks from tasklist with index from PBS JOB ARRAY
TASK=$(sed -n "${PBS_ARRAY_INDEX}p" $PBS_O_WORKDIR/tasklist)
# copy input file and executable to scratch
cp $PBS_O_WORKDIR/$TASK input ; cp $PBS_O_WORKDIR/myprog.x .
# execute the calculation
./myprog.x < input > output
# copy output file to submit directory
cp output $PBS_O_WORKDIR/$TASK.out
```
In this example, the submit directory holds the 900 input files, executable myprog.x and the jobscript file. As input for each run, we take the filename of input file from created tasklist file. We copy the input file to scratch (/scratch/work/user/$USER/$PBS_JOBID), execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The myprog.x runs on one node only and must use threads to run in parallel. Be aware, that if the myprog.x **is not multithreaded**, then all the **jobs are run as single thread programs in sequential** manner. Due to allocation of the whole node, the **accounted time is equal to the usage of whole node**, while using only 1/24 of the node!
If huge number of parallel multicore (in means of multinode multithread, e. g. MPI enabled) jobs is needed to run, then a job array approach should also be used. The main difference compared to previous example using one node is that the local scratch should not be used (as it's not shared between nodes) and MPI or other technique for parallel multinode run has to be used properly.
### Submit the Job Array
To submit the job array, use the qsub -J command. The 900 jobs of the [example above](#array_example) may be submitted like this:
```console
$ qsub -N JOBNAME -J 1-900 jobscript
506493[].isrv5
```
In this example, we submit a job array of 900 subjobs. Each subjob will run on full node and is assumed to take less than 2 hours (note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue).
Sometimes for testing purposes, you may need to submit only one-element array. This is not allowed by PBSPro, but there's a workaround:
```console
$ qsub -N JOBNAME -J 9-10:2 jobscript
```
This will only choose the lower index (9 in this example) for submitting/running your job.
### Manage the Job Array
Check status of the job array by the qstat command.
```console
$ qstat -a 506493[].isrv5
isrv5:
Req'd Req'd Elap
Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
--------------- -------- -- |---|---| ------ --- --- ------ ----- - -----
12345[].dm2 user2 qprod xx 13516 1 24 -- 00:50 B 00:02
```
The status B means that some subjobs are already running.
Check status of the first 100 subjobs by the qstat command.
```console
$ qstat -a 12345[1-100].isrv5
isrv5:
Req'd Req'd Elap
Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
--------------- -------- -- |---|---| ------ --- --- ------ ----- - -----
12345[1].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:02
12345[2].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:02
12345[3].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:01
12345[4].isrv5 user2 qprod xx 13516 1 24 -- 00:50 Q --
. . . . . . . . . . .
, . . . . . . . . . .
12345[100].isrv5 user2 qprod xx 13516 1 24 -- 00:50 Q --
```
Delete the entire job array. Running subjobs will be killed, queueing subjobs will be deleted.
```console
$ qdel 12345[].isrv5
```
Deleting large job arrays may take a while.
Display status information for all user's jobs, job arrays, and subjobs.
```console
$ qstat -u $USER -t
```
Display status information for all user's subjobs.
```console
$ qstat -u $USER -tJ
```
Read more on job arrays in the [PBSPro Users guide](/software/pbspro/).
## GNU Parallel
!!! note
Use GNU parallel to run many single core tasks on one node.
GNU parallel is a shell tool for executing jobs in parallel using one or more computers. A job can be a single command or a small script that has to be run for each of the lines in the input. GNU parallel is most useful in running single core jobs via the queue system on the cluster.
For more information and examples see the parallel man page:
```console
$ ml parallel
$ man parallel
```
### GNU Parallel jobscript
The GNU parallel shell executes multiple instances of the jobscript using all cores on the node. The instances execute different work, controlled by the $PARALLEL_SEQ variable.
Example:
Assume we have 101 input files with name beginning with "file" (e. g. file001, ..., file101). Assume we would like to use each of these input files with program executable myprog.x, each as a separate single core job. We call these single core jobs tasks.
First, we create a tasklist file, listing all tasks - all input files in our example:
```console
$ find . -name 'file*' > tasklist
```
Then we create jobscript:
```bash
#!/bin/bash
#PBS -A PROJECT_ID
#PBS -q qprod
#PBS -l select=1:ncpus=24,walltime=02:00:00
[ -z "$PARALLEL_SEQ" ] &&
{ ml parallel ; exec parallel -a $PBS_O_WORKDIR/tasklist $0 ; }
# change to scratch directory
SCR=/scratch/work/user/$USER/$PBS_JOBID/$PARALLEL_SEQ
mkdir -p $SCR ; cd $SCR || exit
# get individual task from tasklist
TASK=$1
# copy input file and executable to scratch
cp $PBS_O_WORKDIR/$TASK input
# execute the calculation
cat input > output
# copy output file to submit directory
cp output $PBS_O_WORKDIR/$TASK.out
```
In this example, tasks from tasklist are executed via the GNU parallel. The jobscript executes multiple instances of itself in parallel, on all cores of the node. Once an instace of jobscript is finished, new instance starts until all entries in tasklist are processed. Currently processed entry of the joblist may be retrieved via $1 variable. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to the scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name.
### Submit the Job
To submit the job, use the qsub command. The 101 tasks' job of the [example above](#gp_example) may be submitted like this:
```console
$ qsub -N JOBNAME jobscript
12345.dm2
```
In this example, we submit a job of 101 tasks. 24 input files will be processed in parallel. The 101 tasks on 24 cores are assumed to complete in less than 2 hours.
!!! note
Use #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue.
## Job Arrays and GNU Parallel
!!! note
Combine the Job arrays and GNU parallel for best throughput of single core jobs
While job arrays are able to utilize all available computational nodes, the GNU parallel can be used to efficiently run multiple single-core jobs on single node. The two approaches may be combined to utilize all available (current and future) resources to execute single core jobs.
!!! note
Every subjob in an array runs GNU parallel to utilize all cores on the node
### GNU Parallel, Shared jobscript
Combined approach, very similar to job arrays, can be taken. Job array is submitted to the queuing system. The subjobs run GNU parallel. The GNU parallel shell executes multiple instances of the jobscript using all cores on the node. The instances execute different work, controlled by the $PBS_JOB_ARRAY and $PARALLEL_SEQ variables.
Example:
Assume we have 960 input files with name beginning with "file" (e. g. file001, ..., file960). Assume we would like to use each of these input files with program executable myprog.x, each as a separate single core job. We call these single core jobs tasks.
First, we create a tasklist file, listing all tasks - all input files in our example:
```console
$ find . -name 'file*' > tasklist
```
Next we create a file, controlling how many tasks will be executed in one subjob
```console
$ seq 48 > numtasks
```
Then we create jobscript:
```bash
#!/bin/bash
#PBS -A PROJECT_ID
#PBS -q qprod
#PBS -l select=1:ncpus=24,walltime=02:00:00
[ -z "$PARALLEL_SEQ" ] &&
{ ml parallel ; exec parallel -a $PBS_O_WORKDIR/numtasks $0 ; }
# change to scratch directory
SCR=/scratch/work/user/$USER/$PBS_JOBID/$PARALLEL_SEQ
mkdir -p $SCR ; cd $SCR || exit
# get individual task from tasklist with index from PBS JOB ARRAY and index form Parallel
IDX=$(($PBS_ARRAY_INDEX + $PARALLEL_SEQ - 1))
TASK=$(sed -n "${IDX}p" $PBS_O_WORKDIR/tasklist)
[ -z "$TASK" ] && exit
# copy input file and executable to scratch
cp $PBS_O_WORKDIR/$TASK input
# execute the calculation
cat input > output
# copy output file to submit directory
cp output $PBS_O_WORKDIR/$TASK.out
```
In this example, the jobscript executes in multiple instances in parallel, on all cores of a computing node. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to the scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The numtasks file controls how many tasks will be run per subjob. Once an task is finished, new task starts, until the number of tasks in numtasks file is reached.
!!! note
Select subjob walltime and number of tasks per subjob carefully
When deciding this values, think about following guiding rules :
1. Let n = N / 24. Inequality (n + 1) x T < W should hold. The N is number of tasks per subjob, T is expected single task walltime and W is subjob walltime. Short subjob walltime improves scheduling and job throughput.
1. Number of tasks should be modulo 24.
1. These rules are valid only when all tasks have similar task walltimes T.
### Submit the Job Array (-J)
To submit the job array, use the qsub -J command. The 960 tasks' job of the [example above](#combined_example) may be submitted like this:
```console
$ qsub -N JOBNAME -J 1-960:48 jobscript
12345[].dm2
```
In this example, we submit a job array of 20 subjobs. Note the -J 1-960:48, this must be the same as the number sent to numtasks file. Each subjob will run on full node and process 24 input files in parallel, 48 in total per subjob. Every subjob is assumed to complete in less than 2 hours.
!!! note
Use #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue.
## Examples
Download the examples in [capacity.zip](capacity.zip), illustrating the above listed ways to run huge number of jobs. We recommend to try out the examples, before using this for running production jobs.
Unzip the archive in an empty directory on the cluster and follow the instructions in the README file
```console
$ unzip capacity.zip
$ cd capacity
$ cat README
```
File added
# Compute Nodes
## Nodes Configuration
Salomon is cluster of x86-64 Intel based nodes. The cluster contains two types of compute nodes of the same processor type and memory size.
Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerators.**
[More about schematic representation of the Salomon cluster compute nodes IB topology](/salomon/ib-single-plane-topology/).
### Compute Nodes Without Accelerator
* codename "grafton"
* 576 nodes
* 13 824 cores in total
* two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node
* 128 GB of physical memory per node
![cn_m_cell](../img/cn_m_cell.jpg)
### Compute Nodes With MIC Accelerator
* codename "perrin"
* 432 nodes
* 10 368 cores in total
* two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node
* 128 GB of physical memory per node
* MIC accelerator 2 x Intel Xeon Phi 7120P per node, 61-cores, 16 GB per accelerator
![cn_mic](../img/cn_mic-1.jpg)
![(source Silicon Graphics International Corp.)](../img/sgi-c1104-gp1.jpeg)
![cn_mic](../img/cn_mic.jpg)
### Uv 2000
* codename "UV2000"
* 1 node
* 112 cores in total
* 14 x Intel Xeon E5-4627v2, 8-core, 3.3 GHz processors, in 14 NUMA nodes
* 3328 GB of physical memory per node
* 1 x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM
![](../img/uv-2000.jpeg)
### Compute Nodes Summary
| Node type | Count | Memory | Cores |
| -------------------------- | ----- | ----------------- | ----------------------------------- |
| Nodes without accelerator | 576 | 128 GB | 24 @ 2.5GHz |
| Nodes with MIC accelerator | 432 | 128 GB, MIC 32GB | 24 @ 2.5GHz, MIC 61 @ 1.238 GHz |
| UV2000 SMP node | 1 | 3328GB | 112 @ 3.3GHz |
## Processor Architecture
Salomon is equipped with Intel Xeon processors Intel Xeon E5-2680v3. Processors support Advanced Vector Extensions 2.0 (AVX2) 256-bit instruction set.
### Intel Xeon E5-2680v3 Processor
* 12-core
* speed: 2.5 GHz, up to 3.3 GHz using Turbo Boost Technology
* peak performance: 40 GFLOP/s per core @ 2.5 GHz
* caches:
* Intel® Smart Cache: 30 MB
* memory bandwidth at the level of the processor: 68 GB/s
### MIC Accelerator Intel Xeon Phi 7120P Processor
* 61-core
* speed: 1.238
GHz, up to 1.333 GHz using Turbo Boost Technology
* peak performance: 18.4 GFLOP/s per core
* caches:
* L2: 30.5 MB
* memory bandwidth at the level of the processor: 352 GB/s
## Memory Architecture
Memory is equally distributed across all CPUs and cores for optimal performance. Memory is composed of memory modules of the same size and evenly distributed across all memory controllers and memory channels.
### Compute Node Without Accelerator
* 2 sockets
* Memory Controllers are integrated into processors.
* 8 DDR4 DIMMs per node
* 4 DDR4 DIMMs per CPU
* 1 DDR4 DIMMs per channel
* Populated memory: 8 x 16 GB DDR4 DIMM >2133 MHz
### Compute Node With MIC Accelerator
2 sockets
Memory Controllers are integrated into processors.
* 8 DDR4 DIMMs per node
* 4 DDR4 DIMMs per CPU
* 1 DDR4 DIMMs per channel
Populated memory: 8 x 16 GB DDR4 DIMM 2133 MHz
MIC Accelerator Intel Xeon Phi 7120P Processor
* 2 sockets
* Memory Controllers are are connected via an
Interprocessor Network (IPN) ring.
* 16 GDDR5 DIMMs per node
* 8 GDDR5 DIMMs per CPU
* 2 GDDR5 DIMMs per channel
# Hardware Overview
## Introduction
The Salomon cluster consists of 1008 computational nodes of which 576 are regular compute nodes and 432 accelerated nodes. Each node is a powerful x86-64 computer, equipped with 24 cores (two twelve-core Intel Xeon processors) and 128 GB RAM. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 0.5 PB /home NFS disk storage to store the user files. Users may use a DDN Lustre shared storage with capacity of 1.69 PB which is available for the scratch project data. The user access to the Salomon cluster is provided by four login nodes.
[More about schematic representation of the Salomon cluster compute nodes IB topology](/salomon/ib-single-plane-topology/).
![Salomon](../img/salomon-2.jpg)
The parameters are summarized in the following tables:
## General Information
| **In general** | |
| ------------------------------------------- | ------------------------------------------- |
| Primary purpose | High Performance Computing |
| Architecture of compute nodes | x86-64 |
| Operating system | CentOS 6.x Linux |
| [**Compute nodes**](/salomon/compute-nodes/) | |
| Totally | 1008 |
| Processor | 2 x Intel Xeon E5-2680v3, 2.5 GHz, 12 cores |
| RAM | 128GB, 5.3 GB per core, DDR4@2133 MHz |
| Local disk drive | no |
| Compute network / Topology | InfiniBand FDR56 / 7D Enhanced hypercube |
| w/o accelerator | 576 |
| MIC accelerated | 432 |
| **In total** | |
| Total theoretical peak performance (Rpeak) | 2011 TFLOP/s |
| Total amount of RAM | 129.024 TB |
## Compute Nodes
| Node | Count | Processor | Cores | Memory | Accelerator |
| --------------- | ----- | --------------------------------- | ----- | ------ | --------------------------------------------- |
| w/o accelerator | 576 | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24 | 128 GB | - |
| MIC accelerated | 432 | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24 | 128 GB | 2 x Intel Xeon Phi 7120P, 61 cores, 16 GB RAM |
For more details refer to the [Compute nodes](/salomon/compute-nodes/).
## Remote Visualization Nodes
For remote visualization two nodes with NICE DCV software are available each configured:
| Node | Count | Processor | Cores | Memory | GPU Accelerator |
| ------------- | ----- | --------------------------------- | ----- | ------ | ----------------------------- |
| visualization | 2 | 2 x Intel Xeon E5-2695v3, 2.3 GHz | 28 | 512 GB | NVIDIA QUADRO K5000, 4 GB RAM |
## SGI Uv 2000
For large memory computations a special SMP/NUMA SGI UV 2000 server is available:
| Node | Count | Processor | Cores | Memory | Extra HW |
| ------ | ----- | ------------------------------------------- | ----- | --------------------- | ------------------------------------------------------------------------ |
| UV2000 | 1 | 14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores | 112 | 3328 GB DDR3@1866 MHz | 2 x 400GB local SSD, 1x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM |
![](../img/uv-2000.jpeg)
# IB Single-Plane Topology
A complete M-Cell assembly consists of four compute racks. Each rack contains 4 x physical IRUs - Independent rack units. Using one dual socket node per one blade slot leads to 8 logical IRUs. Each rack contains 4 x 2 SGI ICE X IB Premium Blades.
The SGI ICE X IB Premium Blade provides the first level of interconnection via dual 36-port Mellanox FDR InfiniBand ASIC switch with connections as follows:
* 9 ports from each switch chip connect to the unified backplane, to connect the 18 compute node slots
* 3 ports on each chip provide connectivity between the chips
* 24 ports from each switch chip connect to the external bulkhead, for a total of 48
## IB Single-Plane Topology - ICEX M-Cell
Each color in each physical IRU represents one dual-switch ASIC switch.
[IB single-plane topology - ICEX Mcell.pdf](../src/IB single-plane topology - ICEX Mcell.pdf)
![IB single-plane topology - ICEX Mcell.pdf](../img/IBsingleplanetopologyICEXMcellsmall.png)
## IB Single-Plane Topology - Accelerated Nodes
Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](/salomon/7d-enhanced-hypercube/).
As shown in a diagram [IB Topology](/salomon/7d-enhanced-hypercube/#ib-topology)
* Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack.
* Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack.
* Racks 33, 34, 35, 36, 37, 38 are equivalent to one M-Cell rack.
[IB single-plane topology - Accelerated nodes.pdf](../src/IB single-plane topology - Accelerated nodes.pdf)
![IB single-plane topology - Accelerated nodes.pdf](../img/IBsingleplanetopologyAcceleratednodessmall.png)
# Introduction
Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totalling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, and at least 128 GB RAM. Nodes are interconnected through a 7D Enhanced hypercube InfiniBand network and are equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators, and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](/salomon/hardware-overview/).
The cluster runs with a [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg)
## Water-Cooled Compute Nodes With MIC Accelerators
![](../img/salomon.jpg)
![](../img/salomon-1.jpeg)
## Tape Library T950B
![](../img/salomon-3.jpeg)
![](../img/salomon-4.jpeg)
# Job Scheduling
## Job Execution Priority
Scheduler gives each job an execution priority and then uses this job execution priority to select which job(s) to run.
Job execution priority is determined by these job properties (in order of importance):
1. queue priority
1. fair-share priority
1. eligible time
### Queue Priority
Queue priority is priority of queue where job is queued before execution.
Queue priority has the biggest impact on job execution priority. Execution priority of jobs in higher priority queues is always greater than execution priority of jobs in lower priority queues. Other properties of job used for determining job execution priority (fair-share priority, eligible time) cannot compete with queue priority.
Queue priorities can be seen at [https://extranet.it4i.cz/rsweb/salomon/queues](https://extranet.it4i.cz/rsweb/salomon/queues)
### Fair-Share Priority
Fair-share priority is priority calculated on recent usage of resources. Fair-share priority is calculated per project, all members of project share same fair-share priority. Projects with higher recent usage have lower fair-share priority than projects with lower or none recent usage.
Fair-share priority is used for ranking jobs with equal queue priority.
Fair-share priority is calculated as
---8<--- "fairshare_formula.md"
where MAX_FAIRSHARE has value 1E6,
usage<sub>Project</sub> is cumulated usage by all members of selected project,
usage<sub>Total</sub> is total usage by all users, by all projects.
Usage counts allocated core-hours (`ncpus x walltime`). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week).
## Jobs Queued in Queue qexp Are Not Calculated to Project's Usage.
!!! note
Calculated usage and fair-share priority can be seen at <https://extranet.it4i.cz/rsweb/salomon/projects>.
Calculated fair-share priority can be also seen as Resource_List.fairshare attribute of a job.
### Eligible Time
Eligible time is amount (in seconds) of eligible time job accrued while waiting to run. Jobs with higher eligible time gains higher priority.
Eligible time has the least impact on execution priority. Eligible time is used for sorting jobs with equal queue priority and fair-share priority. It is very, very difficult for eligible time to compete with fair-share priority.
Eligible time can be seen as eligible_time attribute of job.
### Formula
Job execution priority (job sort formula) is calculated as:
---8<--- "job_sort_formula.md"
### Job backfilling
The scheduler uses job backfilling.
Backfilling means fitting smaller jobs around the higher-priority jobs that the scheduler is going to run next, in such a way that the higher-priority jobs are not delayed. Backfilling allows us to keep resources from becoming idle when the top job (job with the highest execution priority) cannot run.
The scheduler makes a list of jobs to run in order of execution priority. Scheduler looks for smaller jobs that can fit into the usage gaps around the highest-priority jobs in the list. The scheduler looks in the prioritized list of jobs and chooses the highest-priority smaller jobs that fit. Filler jobs are run only if they will not delay the start time of top jobs.
It means, that jobs with lower execution priority can be run before jobs with higher execution priority.
!!! note
It is **very beneficial to specify the walltime** when submitting jobs.
Specifying more accurate walltime enables better scheduling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority.
### Job Placement
Job [placement can be controlled by flags during submission](/salomon/job-submission-and-execution/#job_placement).
---8<--- "mathjax.md"
# Job Submission and Execution
## Job Submission
When allocating computational resources for the job, specify:
1. suitable queue for your job (default is qprod)
1. number of computational nodes required
1. number of cores per node required
1. maximum wall time allocated to your calculation, note that jobs exceeding maximum wall time will be killed
1. Project ID
1. Jobscript or interactive switch
!!! note
Use the **qsub** command to submit your job to a queue for allocation of the computational resources.
Submit the job using the qsub command:
```console
$ qsub -A Project_ID -q queue -l select=x:ncpus=y,walltime=[[hh:]mm:]ss[.ms] jobscript
```
The qsub submits the job into the queue, in another words the qsub command creates a request to the PBS Job manager for allocation of specified resources. The resources will be allocated when available, subject to above described policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.**
!!! note
PBS statement nodes (qsub -l nodes=nodespec) is not supported on Salomon cluster.
### Job Submission Examples
```console
$ qsub -A OPEN-0-0 -q qprod -l select=64:ncpus=24,walltime=03:00:00 ./myjob
```
In this example, we allocate 64 nodes, 24 cores per node, for 3 hours. We allocate these resources via the qprod queue, consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. Jobscript myjob will be executed on the first node in the allocation.
```console
$ qsub -q qexp -l select=4:ncpus=24 -I
```
In this example, we allocate 4 nodes, 24 cores per node, for 1 hour. We allocate these resources via the qexp queue. The resources will be available interactively
```console
$ qsub -A OPEN-0-0 -q qlong -l select=10:ncpus=24 ./myjob
```
In this example, we allocate 10 nodes, 24 cores per node, for 72 hours. We allocate these resources via the qlong queue. Jobscript myjob will be executed on the first node in the allocation.
```console
$ qsub -A OPEN-0-0 -q qfree -l select=10:ncpus=24 ./myjob
```
In this example, we allocate 10 nodes, 24 cores per node, for 12 hours. We allocate these resources via the qfree queue. It is not required that the project OPEN-0-0 has any available resources left. Consumed resources are still accounted for. Jobscript myjob will be executed on the first node in the allocation.
### Intel Xeon Phi Co-Processors
To allocate a node with Xeon Phi co-processor, user needs to specify that in select statement. Currently only allocation of whole nodes with both Phi cards as the smallest chunk is supported. Standard PBSPro approach through attributes "accelerator", "naccelerators" and "accelerator_model" is used. The "accelerator_model" can be omitted, since on Salomon only one type of accelerator type/model is available.
The absence of specialized queue for accessing the nodes with cards means, that the Phi cards can be utilized in any queue, including qexp for testing/experiments, qlong for longer jobs, qfree after the project resources have been spent, etc. The Phi cards are thus also available to PRACE users. There's no need to ask for permission to utilize the Phi cards in project proposals.
```console
$ qsub -A OPEN-0-0 -I -q qprod -l select=1:ncpus=24:accelerator=True:naccelerators=2:accelerator_model=phi7120 ./myjob
```
In this example, we allocate 1 node, with 24 cores, with 2 Xeon Phi 7120p cards, running batch job ./myjob. The default time for qprod is used, e. g. 24 hours.
```console
$ qsub -A OPEN-0-0 -I -q qlong -l select=4:ncpus=24:accelerator=True:naccelerators=2 -l walltime=56:00:00 -I
```
In this example, we allocate 4 nodes, with 24 cores per node (totalling 96 cores), with 2 Xeon Phi 7120p cards per node (totalling 8 Phi cards), running interactive job for 56 hours. The accelerator model name was omitted.
#### Intel Xeon Phi - Queue QMIC
Examples executions
```console
-l select=1
exec_vnode = (r21u05n581-mic0:naccelerators=1:ncpus=0)
-l select=4
(r21u05n581-mic0:naccelerators=1:ncpus=0)+(r21u05n581-mic1:naccelerators=1:ncpus=0)+(r21u06n582-mic0:naccelerators=1:ncpus=0)+(r21u06n582-mic1:naccelerators=1:ncpus=0)
-l select=4:naccelerators=1
(r21u05n581-mic0:naccelerators=1:ncpus=0)+(r21u05n581-mic1:naccelerators=1:ncpus=0)+(r21u06n582-mic0:naccelerators=1:ncpus=0)+(r21u06n582-mic1:naccelerators=1:ncpus=0)
-l select=1:naccelerators=2
(r21u05n581-mic0:naccelerators=1+r21u05n581-mic1:naccelerators=1)
-l select=2:naccelerators=2
(r21u05n581-mic0:naccelerators=1+r21u05n581-mic1:naccelerators=1)+(r21u06n582-mic0:naccelerators=1+r21u06n582-mic1:naccelerators=1)
-l select=1:ncpus=24:naccelerators=2
(r22u32n610:ncpus=24+r22u32n610-mic0:naccelerators=1+r22u32n610-mic1:naccelerators=1)
-l select=1:ncpus=24:naccelerators=0+4
(r33u17n878:ncpus=24:naccelerators=0)+(r33u13n874-mic0:naccelerators=1:ncpus=0)+(r33u13n874-mic1:naccelerators=1:ncpus=0)+(r33u16n877-mic0:naccelerators=1:ncpus=0)+(r33u16n877-mic1:naccelerators=1:ncpus=0)
```
### UV2000 SMP
!!! note
13 NUMA nodes available on UV2000
Per NUMA node allocation.
Jobs are isolated by cpusets.
The UV2000 (node uv1) offers 3TB of RAM and 104 cores, distributed in 13 NUMA nodes. A NUMA node packs 8 cores and approx. 247GB RAM (with exception, node 11 has only 123GB RAM). In the PBS the UV2000 provides 13 chunks, a chunk per NUMA node (see [Resource allocation policy](/salomon/resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself.
```console
$ qsub -A OPEN-0-0 -q qfat -l select=13 ./myjob
```
In this example, we allocate all 13 NUMA nodes (corresponds to 13 chunks), 104 cores of the SGI UV2000 node for 24 hours. Jobscript myjob will be executed on the node uv1.
```console
$ qsub -A OPEN-0-0 -q qfat -l select=1:mem=2000GB ./myjob
```
In this example, we allocate 2000GB of memory on the UV2000 for 24 hours. By requesting 2000GB of memory, memory from 10 chunks and 8 cores are allocated. Jobscript myjob will be executed on the node uv1.
```console
$ qsub -A OPEN-0-0 -q qfat -l select=1:mem=3099GB,walltime=48:00:00 ./myjob
```
In this example, we allocate 3099GB of memory on the UV2000 for 48 hours. By requesting 3099GB of memory, memory from all 13 chunks and 8 cores are allocated. Jobscript myjob will be executed on the node uv1.
```console
$ qsub -A OPEN-0-0 -q qfat -l select=2:mem=1000GB,walltime=48:00:00 ./myjob
```
In this example, we allocate 2000GB of memory and 16 cores on the UV2000 for 48 hours. By requesting 1000GB of memory per chunk, 2000GB of memory and 16 cores are allocated. Jobscript myjob will be executed on the node uv1.
### Useful Tricks
All qsub options may be [saved directly into the jobscript](#example-jobscript-for-mpi-calculation-with-preloaded-inputs). In such a case, no options to qsub are needed.
```console
$ qsub ./myjob
```
By default, the PBS batch system sends an e-mail only when the job is aborted. Disabling mail events completely can be done like this:
```console
$ qsub -m n
```
## Advanced Job Placement
### Placement by Name
!!! note
Not useful for ordinary computing, suitable for node testing/bechmarking and management tasks.
Specific nodes may be selected using PBS resource attribute host (for hostnames):
```console
qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=24:host=r24u35n680+1:ncpus=24:host=r24u36n681 -I
```
Specific nodes may be selected using PBS resource attribute cname (for short names in cns[0-1]+ format):
```console
qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=24:host=cns680+1:ncpus=24:host=cns681 -I
```
In this example, we allocate nodes r24u35n680 and r24u36n681, all 24 cores per node, for 24 hours. Consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. The resources will be available interactively.
### Placement by Network Location
Network location of allocated nodes in the [InifiBand network](/salomon/network/) influences efficiency of network communication between nodes of job. Nodes on the same InifiBand switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology.
For communication intensive jobs it is possible to set stricter requirement - to require nodes directly connected to the same InifiBand switch or to require nodes located in the same dimension group of the InifiBand network.
### Placement by InifiBand Switch
Nodes directly connected to the same InifiBand switch can communicate most efficiently. Using the same switch prevents hops in the network and provides for unbiased, most efficient network communication. There are 9 nodes directly connected to every InifiBand switch.
!!! note
We recommend allocating compute nodes of a single switch when the best possible computational network performance is required to run job efficiently.
Nodes directly connected to the one InifiBand switch can be allocated using node grouping on PBS resource attribute switch.
In this example, we request all 9 nodes directly connected to the same switch using node grouping placement.
```console
$ qsub -A OPEN-0-0 -q qprod -l select=9:ncpus=24 -l place=group=switch ./myjob
```
### Placement by Specific InifiBand Switch
!!! note
Not useful for ordinary computing, suitable for testing and management tasks.
Nodes directly connected to the specific InifiBand switch can be selected using the PBS resource attribute _switch_.
In this example, we request all 9 nodes directly connected to r4i1s0sw1 switch.
```console
$ qsub -A OPEN-0-0 -q qprod -l select=9:ncpus=24:switch=r4i1s0sw1 ./myjob
```
List of all InifiBand switches:
```console
$ qmgr -c 'print node @a' | grep switch | awk '{print $6}' | sort -u
r1i0s0sw0
r1i0s0sw1
r1i1s0sw0
r1i1s0sw1
r1i2s0sw0
...
```
List of all all nodes directly connected to the specific InifiBand switch:
```console
$ qmgr -c 'p n @d' | grep 'switch = r36sw3' | awk '{print $3}' | sort
r36u31n964
r36u32n965
r36u33n966
r36u34n967
r36u35n968
r36u36n969
r37u32n970
r37u33n971
r37u34n972
```
### Placement by Hypercube Dimension
Nodes located in the same dimension group may be allocated using node grouping on PBS resource attribute ehc\_[1-7]d .
| Hypercube dimension | node_group_key | #nodes per group |
| ------------------- | -------------- | ---------------- |
| 1D | ehc_1d | 18 |
| 2D | ehc_2d | 36 |
| 3D | ehc_3d | 72 |
| 4D | ehc_4d | 144 |
| 5D | ehc_5d | 144,288 |
| 6D | ehc_6d | 432,576 |
| 7D | ehc_7d | all |
In this example, we allocate 16 nodes in the same [hypercube dimension](/salomon/7d-enhanced-hypercube/) 1 group.
```console
$ qsub -A OPEN-0-0 -q qprod -l select=16:ncpus=24 -l place=group=ehc_1d -I
```
For better understanding:
List of all groups in dimension 1:
```console
$ qmgr -c 'p n @d' | grep ehc_1d | awk '{print $6}' | sort |uniq -c
18 r1i0
18 r1i1
18 r1i2
18 r1i3
...
```
List of all all nodes in specific dimension 1 group:
```console
$ $ qmgr -c 'p n @d' | grep 'ehc_1d = r1i0' | awk '{print $3}' | sort
r1i0n0
r1i0n1
r1i0n10
r1i0n11
...
```
## Job Management
!!! note
Check status of your jobs using the **qstat** and **check-pbs-jobs** commands
```console
$ qstat -a
$ qstat -a -u username
$ qstat -an -u username
$ qstat -f 12345.isrv5
```
Example:
```console
$ qstat -a
srv11:
Req'd Req'd Elap
Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
--------------- -------- -- |---|---| ------ --- --- ------ ----- - -----
16287.isrv5 user1 qlong job1 6183 4 64 -- 144:0 R 38:25
16468.isrv5 user1 qlong job2 8060 4 64 -- 144:0 R 17:44
16547.isrv5 user2 qprod job3x 13516 2 32 -- 48:00 R 00:58
```
In this example user1 and user2 are running jobs named job1, job2 and job3x. The jobs job1 and job2 are using 4 nodes, 16 cores per node each. The job1 already runs for 38 hours and 25 minutes, job2 for 17 hours 44 minutes. The job1 already consumed 64 x 38.41 = 2458.6 core hours. The job3x already consumed 0.96 x 32 = 30.93 core hours. These consumed core hours will be accounted on the respective project accounts, regardless of whether the allocated cores were actually used for computations.
Check status of your jobs using check-pbs-jobs command. Check presence of user's PBS jobs' processes on execution hosts. Display load, processes. Display job standard and error output. Continuously display (tail -f) job standard or error output.
```console
$ check-pbs-jobs --check-all
$ check-pbs-jobs --print-load --print-processes
$ check-pbs-jobs --print-job-out --print-job-err
$ check-pbs-jobs --jobid JOBID --check-all --print-all
$ check-pbs-jobs --jobid JOBID --tailf-job-out
```
Examples:
```console
$ check-pbs-jobs --check-all
JOB 35141.dm2, session_id 71995, user user2, nodes r3i6n2,r3i6n3
Check session id: OK
Check processes
r3i6n2: OK
r3i6n3: No process
```
In this example we see that job 35141.dm2 currently runs no process on allocated node r3i6n2, which may indicate an execution error.
```console
$ check-pbs-jobs --print-load --print-processes
JOB 35141.dm2, session_id 71995, user user2, nodes r3i6n2,r3i6n3
Print load
r3i6n2: LOAD: 16.01, 16.01, 16.00
r3i6n3: LOAD: 0.01, 0.00, 0.01
Print processes
%CPU CMD
r3i6n2: 0.0 -bash
r3i6n2: 0.0 /bin/bash /var/spool/PBS/mom_priv/jobs/35141.dm2.SC
r3i6n2: 99.7 run-task
...
```
In this example we see that job 35141.dm2 currently runs process run-task on node r3i6n2, using one thread only, while node r3i6n3 is empty, which may indicate an execution error.
```console
$ check-pbs-jobs --jobid 35141.dm2 --print-job-out
JOB 35141.dm2, session_id 71995, user user2, nodes r3i6n2,r3i6n3
Print job standard output:
======================== Job start ==========================
Started at : Fri Aug 30 02:47:53 CEST 2013
Script name : script
Run loop 1
Run loop 2
Run loop 3
```
In this example, we see actual output (some iteration loops) of the job 35141.dm2
!!! note
Manage your queued or running jobs, using the **qhold**, **qrls**, **qdel,** **qsig** or **qalter** commands
You may release your allocation at any time, using qdel command
```console
$ qdel 12345.isrv5
```
You may kill a running job by force, using qsig command
```console
$ qsig -s 9 12345.isrv5
```
Learn more by reading the pbs man page
```console
$ man pbs_professional
```
## Job Execution
### Jobscript
!!! note
Prepare the jobscript to run batch jobs in the PBS queue system
The Jobscript is a user made script, controlling sequence of commands for executing the calculation. It is often written in bash, other scripts may be used as well. The jobscript is supplied to PBS **qsub** command as an argument and executed by the PBS Professional workload manager.
!!! note
The jobscript or interactive shell is executed on first of the allocated nodes.
```console
$ qsub -q qexp -l select=4:ncpus=24 -N Name0 ./myjob
$ qstat -n -u username
isrv5:
Req'd Req'd Elap
Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
--------------- -------- -- |---|---| ------ --- --- ------ ----- - -----
15209.isrv5 username qexp Name0 5530 4 96 -- 01:00 R 00:00
r21u01n577/0*24+r21u02n578/0*24+r21u03n579/0*24+r21u04n580/0*24
```
In this example, the nodes r21u01n577, r21u02n578, r21u03n579, r21u04n580 were allocated for 1 hour via the qexp queue. The jobscript myjob will be executed on the node r21u01n577, while the nodes r21u02n578, r21u03n579, r21u04n580 are available for use as well.
!!! note
The jobscript or interactive shell is by default executed in home directory
```console
$ qsub -q qexp -l select=4:ncpus=24 -I
qsub: waiting for job 15210.isrv5 to start
qsub: job 15210.isrv5 ready
$ pwd
/home/username
```
In this example, 4 nodes were allocated interactively for 1 hour via the qexp queue. The interactive shell is executed in the home directory.
!!! note
All nodes within the allocation may be accessed via ssh. Unallocated nodes are not accessible to user.
The allocated nodes are accessible via ssh from login nodes. The nodes may access each other via ssh as well.
Calculations on allocated nodes may be executed remotely via the MPI, ssh, pdsh or clush. You may find out which nodes belong to the allocation by reading the $PBS_NODEFILE file
```console
qsub -q qexp -l select=2:ncpus=24 -I
qsub: waiting for job 15210.isrv5 to start
qsub: job 15210.isrv5 ready
$ pwd
/home/username
$ sort -u $PBS_NODEFILE
r2i5n6.ib0.smc.salomon.it4i.cz
r4i6n13.ib0.smc.salomon.it4i.cz
r4i7n0.ib0.smc.salomon.it4i.cz
r4i7n2.ib0.smc.salomon.it4i.cz
$ pdsh -w r2i5n6,r4i6n13,r4i7n[0,2] hostname
r4i6n13: r4i6n13
r2i5n6: r2i5n6
r4i7n2: r4i7n2
r4i7n0: r4i7n0
```
In this example, the hostname program is executed via pdsh from the interactive shell. The execution runs on all four allocated nodes. The same result would be achieved if the pdsh is called from any of the allocated nodes or from the login nodes.
### Example Jobscript for MPI Calculation
!!! note
Production jobs must use the /scratch directory for I/O
The recommended way to run production jobs is to change to /scratch directory early in the jobscript, copy all inputs to /scratch, execute the calculations and copy outputs to home directory.
```bash
#!/bin/bash
# change to scratch directory, exit on failure
SCRDIR=/scratch/work/user/$USER/myjob
mkdir -p $SCRDIR
cd $SCRDIR || exit
# copy input file to scratch
cp $PBS_O_WORKDIR/input .
cp $PBS_O_WORKDIR/mympiprog.x .
# load the MPI module
ml OpenMPI
# execute the calculation
mpiexec -pernode ./mympiprog.x
# copy output file to home
cp output $PBS_O_WORKDIR/.
#exit
exit
```
In this example, some directory on the /home holds the input file input and executable mympiprog.x . We create a directory myjob on the /scratch filesystem, copy input and executable files from the /home directory where the qsub was invoked ($PBS_O_WORKDIR) to /scratch, execute the MPI programm mympiprog.x and copy the output file back to the /home directory. The mympiprog.x is executed as one process per node, on all allocated nodes.
!!! note
Consider preloading inputs and executables onto [shared scratch](storage/) before the calculation starts.
In some cases, it may be impractical to copy the inputs to scratch and outputs to home. This is especially true when very large input and output files are expected, or when the files should be reused by a subsequent calculation. In such a case, it is users responsibility to preload the input files on shared /scratch before the job submission and retrieve the outputs manually, after all calculations are finished.
!!! note
Store the qsub options within the jobscript. Use **mpiprocs** and **ompthreads** qsub options to control the MPI job execution.
### Example Jobscript for MPI Calculation With Preloaded Inputs
Example jobscript for an MPI job with preloaded inputs and executables, options for qsub are stored within the script :
```bash
#!/bin/bash
#PBS -q qprod
#PBS -N MYJOB
#PBS -l select=100:ncpus=24:mpiprocs=1:ompthreads=24
#PBS -A OPEN-0-0
# change to scratch directory, exit on failure
SCRDIR=/scratch/work/user/$USER/myjob
cd $SCRDIR || exit
# load the MPI module
ml OpenMPI
# execute the calculation
mpiexec ./mympiprog.x
#exit
exit
```
In this example, input and executable files are assumed preloaded manually in /scratch/$USER/myjob directory. Note the **mpiprocs** and **ompthreads** qsub options, controlling behavior of the MPI execution. The mympiprog.x is executed as one process per node, on all 100 allocated nodes. If mympiprog.x implements OpenMP threads, it will run 24 threads per node.
HTML commented section #2 (examples need to be reworked)
### Example Jobscript for Single Node Calculation
!!! note
Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. Be very careful, use of RAM disk filesystem is at the expense of operational memory.
Example jobscript for single node calculation, using [local scratch](/salomon/storage/) on the node:
```bash
#!/bin/bash
# change to local scratch directory
cd /lscratch/$PBS_JOBID || exit
# copy input file to scratch
cp $PBS_O_WORKDIR/input .
cp $PBS_O_WORKDIR/myprog.x .
# execute the calculation
./myprog.x
# copy output file to home
cp output $PBS_O_WORKDIR/.
#exit
exit
```
In this example, some directory on the home holds the input file input and executable myprog.x . We copy input and executable files from the home directory where the qsub was invoked ($PBS_O_WORKDIR) to local scratch /lscratch/$PBS_JOBID, execute the myprog.x and copy the output file back to the /home directory. The myprog.x runs on one node only and may use threads.
# Network
All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet)
network. Only [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network may be used to transfer user data.
## InfiniBand Network
All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](/salomon/7d-enhanced-hypercube/).
Read more about schematic representation of the Salomon cluster [IB single-plain topology](/salomon/ib-single-plane-topology/)
([hypercube dimension](/salomon/7d-enhanced-hypercube/)).
The compute nodes may be accessed via the Infiniband network using ib0 network interface, in address range 10.17.0.0 (mask 255.255.224.0). The MPI may be used to establish native Infiniband connection among the nodes.
The network provides **2170MB/s** transfer rates via the TCP connection (single stream) and up to **3600MB/s** via native Infiniband protocol.
## Example
```console
$ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob
$ qstat -n -u username
Req'd Req'd Elap
Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
--------------- -------- -- |---|---| ------ --- --- ------ ----- - -----
15209.isrv5 username qexp Name0 5530 4 96 -- 01:00 R 00:00
r4i1n0/0*24+r4i1n1/0*24+r4i1n2/0*24+r4i1n3/0*24
```
In this example, we access the node r4i1n0 by Infiniband network via the ib0 interface.
```console
$ ssh 10.17.35.19
```
In this example, we get
information of the Infiniband network.
```console
$ ifconfig
....
inet addr:10.17.35.19....
....
$ ip addr show ib0
....
inet 10.17.35.19....
....
```
# Resources Allocation Policy
## Job Queue Policies
The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](/salomon/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview:
!!! note
Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/>
| queue | active project | project resources | nodes | min ncpus | priority | authorization | walltime |
| ------------------------------- | -------------- | -------------------- | ------------------------------------------------------------- | --------- | -------- | ------------- | --------- |
| **qexp** Express queue | no | none required | 32 nodes, max 8 per user | 24 | 150 | no | 1 / 1h |
| **qprod** Production queue | yes | > 0 | 1006 nodes, max 86 per job | 24 | 0 | no | 24 / 48h |
| **qlong** Long queue | yes | > 0 | 256 nodes, max 40 per job, only non-accelerated nodes allowed | 24 | 0 | no | 72 / 144h |
| **qmpp** Massive parallel queue | yes | > 0 | 1006 nodes | 24 | 0 | yes | 2 / 4h |
| **qfat** UV2000 queue | yes | > 0 | 1 (uv1) | 8 | 200 | yes | 24 / 48h |
| **qfree** Free resource queue | yes | < 120% of allocation | 987 nodes, max 86 per job | 24 | -1024 | no | 12 / 12h |
| **qviz** Visualization queue | yes | none required | 2 (with NVIDIA Quadro K5000) | 4 | 150 | no | 1 / 8h |
| **qmic** Intel Xeon Phi cards | yes | > 0 | 864 Intel Xeon Phi cards, max 8 mic per job | 0 | 0 | no | 24 / 48h |
!!! note
**The qfree queue is not free of charge**. [Normal accounting](#resource-accounting-policy) applies. However, it allows for utilization of free resources, once a Project exhausted all its allocated computational resources. This does not apply to Directors Discretion (DD projects) but may be allowed upon request.
* **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour.
* **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, however only 86 per job. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours.
* **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 336 nodes without acceleration may be accessed via the qlong queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 \* 48 h)
* **qmpp**, the massively parallel queue. This queue is intended for massively parallel runs. It is required that active project with nonzero remaining resources is specified to enter the qmpp. All nodes may be accessed via the qmpp queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qmpp is 4 hours. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project.
* **qfat**, the UV2000 queue. This queue is dedicated to access the fat SGI UV2000 SMP machine. The machine (uv1) has 112 Intel IvyBridge cores at 3.3GHz and 3.25TB RAM (8 cores and 128GB RAM are dedicated for system). An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project.
* **qfree**, the Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue. Consumed resources will be accounted to the Project. Access to the qfree queue is automatically removed if consumed resources exceed 120% of the resources allocated to the Project. Only 987 nodes without accelerator may be accessed from this queue. Full nodes, 24 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours.
* **qviz**, the Visualization queue: Intended for pre-/post-processing using OpenGL accelerated graphics. Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 73 GB of RAM and 1/7 of the GPU capacity (default "chunk"). If more GPU power or RAM is required, it is recommended to allocate more chunks (with 4 cores each) up to one whole node per user, so that all 28 cores, 512 GB RAM and whole GPU is exclusive. This is currently also the maximum allowed allocation per one user. One hour of work is allocated by default, the user may ask for 2 hours maximum.
* **qmic**, the queue qmic to access MIC nodes. It is required that active project with nonzero remaining resources is specified to enter the qmic. All 864 MICs are included.
!!! note
To access node with Xeon Phi co-processor user needs to specify that in [job submission select statement](job-submission-and-execution/).
## Queue Notes
The job wall-clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be [set manually, see examples](/salomon/job-submission-and-execution/).
Jobs that exceed the reserved wall-clock time (Req'd Time) get killed automatically. Wall-clock time limit can be changed for queuing jobs (state Q) using the qalter command, however can not be changed for a running job (state R).
Salomon users may check current queue configuration at [https://extranet.it4i.cz/rsweb/salomon/queues](https://extranet.it4i.cz/rsweb/salomon/queues).
## Queue Status
!!! note
Check the status of jobs, queues and compute nodes at [https://extranet.it4i.cz/rsweb/salomon/](https://extranet.it4i.cz/rsweb/salomon)
![RSWEB Salomon](../img/rswebsalomon.png "RSWEB Salomon")
Display the queue status on Salomon:
```console
$ qstat -q
```
The PBS allocation overview may be obtained also using the rspbs command.
```console
$ rspbs
Usage: rspbs [options]
Options:
--version show program's version number and exit
-h, --help show this help message and exit
--get-server-details Print server
--get-queues Print queues
--get-queues-details Print queues details
--get-reservations Print reservations
--get-reservations-details
Print reservations details
--get-nodes Print nodes of PBS complex
--get-nodeset Print nodeset of PBS complex
--get-nodes-details Print nodes details
--get-jobs Print jobs
--get-jobs-details Print jobs details
--get-jobs-check-params
Print jobid, job state, session_id, user, nodes
--get-users Print users of jobs
--get-allocated-nodes
Print allocated nodes of jobs
--get-allocated-nodeset
Print allocated nodeset of jobs
--get-node-users Print node users
--get-node-jobs Print node jobs
--get-node-ncpus Print number of ncpus per node
--get-node-allocated-ncpus
Print number of allocated ncpus per node
--get-node-qlist Print node qlist
--get-node-ibswitch Print node ibswitch
--get-user-nodes Print user nodes
--get-user-nodeset Print user nodeset
--get-user-jobs Print user jobs
--get-user-jobc Print number of jobs per user
--get-user-nodec Print number of allocated nodes per user
--get-user-ncpus Print number of allocated ncpus per user
--get-qlist-nodes Print qlist nodes
--get-qlist-nodeset Print qlist nodeset
--get-ibswitch-nodes Print ibswitch nodes
--get-ibswitch-nodeset
Print ibswitch nodeset
--summary Print summary
--get-node-ncpu-chart
Obsolete. Print chart of allocated ncpus per node
--server=SERVER Use given PBS server
--state=STATE Only for given job state
--jobid=JOBID Only for given job ID
--user=USER Only for given user
--node=NODE Only for given node
--nodestate=NODESTATE
Only for given node state (affects only --get-node*
--get-qlist-* --get-ibswitch-* actions)
--incl-finished Include finished jobs
```
---8<--- "resource_accounting.md"
---8<--- "mathjax.md"
# Accessing the Cluster
## Shell Access
The Salomon cluster is accessed by SSH protocol via login nodes login1, login2, login3 and login4 at address salomon.it4i.cz. The login nodes may be addressed specifically, by prepending the login node name to the address.
!!! note
The alias salomon.it4i.cz is currently not available through VPN connection. Please use loginX.salomon.it4i.cz when connected to VPN.
| Login address | Port | Protocol | Login node |
| ---------------------- | ---- | -------- | ------------------------------------- |
| salomon.it4i.cz | 22 | ssh | round-robin DNS record for login[1-4] |
| login1.salomon.it4i.cz | 22 | ssh | login1 |
| login2.salomon.it4i.cz | 22 | ssh | login2 |
| login3.salomon.it4i.cz | 22 | ssh | login3 |
| login4.salomon.it4i.cz | 22 | ssh | login4 |
The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/)
!!! note
Please verify SSH fingerprints during the first logon. They are identical on all login nodes:
md5:
f6:28:98:e4:f9:b2:a6:8f:f2:f4:2d:0a:09:67:69:80 (DSA)
70:01:c9:9a:5d:88:91:c7:1b:c0:84:d1:fa:4e:83:5c (RSA)
sha256:
epkqEU2eFzXnMeMMkpX02CykyWjGyLwFj528Vumpzn4 (DSA)
WNIrR7oeQDYpBYy4N2d5A6cJ2p0837S7gzzTpaDBZrc (RSA)
Private key authentication:
On **Linux** or **Mac**, use
```console
local $ ssh -i /path/to/id_rsa username@salomon.it4i.cz
```
If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to set lower permissions to private key file.
```console
local $ chmod 600 /path/to/id_rsa
```
On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-access-and-data-transfer/putty.md).
After logging in, you will see the command prompt:
```console
_____ _
/ ____| | |
| (___ __ _| | ___ _ __ ___ ___ _ __
\___ \ / _` | |/ _ \| '_ ` _ \ / _ \| '_ \
____) | (_| | | (_) | | | | | | (_) | | | |
|_____/ \__,_|_|\___/|_| |_| |_|\___/|_| |_|
http://www.it4i.cz/?lang=en
Last login: Tue Jul 9 15:57:38 2018 from your-host.example.com
[username@login2.salomon ~]$
```
!!! note
The environment is **not** shared between login nodes, except for [shared filesystems](/salomon/storage/).
## Data Transfer
Data in and out of the system may be transferred by the [scp](http://en.wikipedia.org/wiki/Secure_copy) and sftp protocols.
| Address | Port | Protocol |
| ---------------------- | ---- | --------- |
| salomon.it4i.cz | 22 | scp, sftp |
| login1.salomon.it4i.cz | 22 | scp, sftp |
| login2.salomon.it4i.cz | 22 | scp, sftp |
| login3.salomon.it4i.cz | 22 | scp, sftp |
| login4.salomon.it4i.cz | 22 | scp, sftp |
The authentication is by the [private key](/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/)
On linux or Mac, use scp or sftp client to transfer the data to Salomon:
```console
local $ scp -i /path/to/id_rsa my-local-file username@salomon.it4i.cz:directory/file
```
```console
local $ scp -i /path/to/id_rsa -r my-local-dir username@salomon.it4i.cz:directory
```
or
```console
local $ sftp -o IdentityFile=/path/to/id_rsa username@salomon.it4i.cz
```
Very convenient way to transfer files in and out of the Salomon computer is via the fuse filesystem [sshfs](http://linux.die.net/man/1/sshfs)
```console
local $ sshfs -o IdentityFile=/path/to/id_rsa username@salomon.it4i.cz:. mountpoint
```
Using sshfs, the users Salomon home directory will be mounted on your local computer, just like an external disk.
Learn more on ssh, scp and sshfs by reading the manpages
```console
$ man ssh
$ man scp
$ man sshfs
```
On Windows, use [WinSCP client](http://winscp.net/eng/download.php) to transfer the data. The [win-sshfs client](http://code.google.com/p/win-sshfs/) provides a way to mount the Salomon filesystems directly as an external disc.
More information about the shared file systems is available [here](/salomon/storage/).
## Connection Restrictions
Outgoing connections, from Salomon Cluster login nodes to the outside world, are restricted to following ports:
| Port | Protocol |
| ---- | -------- |
| 22 | ssh |
| 80 | http |
| 443 | https |
| 9418 | git |
!!! note
Please use **ssh port forwarding** and proxy servers to connect from Salomon to all other remote ports.
Outgoing connections, from Salomon Cluster compute nodes are restricted to the internal network. Direct connections form compute nodes to outside world are cut.
## Port Forwarding
### Port Forwarding From Login Nodes
!!! note
Port forwarding allows an application running on Salomon to connect to arbitrary remote host and port.
It works by tunneling the connection from Salomon back to users workstation and forwarding from the workstation to the remote host.
Pick some unused port on Salomon login node (for example 6000) and establish the port forwarding:
```console
local $ ssh -R 6000:remote.host.com:1234 salomon.it4i.cz
```
In this example, we establish port forwarding between port 6000 on Salomon and port 1234 on the remote.host.com. By accessing localhost:6000 on Salomon, an application will see response of remote.host.com:1234. The traffic will run via users local workstation.
Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration screen, load your Salomon configuration first. Then go to Connection->SSH->Tunnels to set up the port forwarding. Click Remote radio button. Insert 6000 to Source port textbox. Insert remote.host.com:1234. Click Add button, then Open.
Port forwarding may be established directly to the remote host. However, this requires that user has ssh access to remote.host.com
```console
$ ssh -L 6000:localhost:1234 remote.host.com
```
Note: Port number 6000 is chosen as an example only. Pick any free port.
### Port Forwarding From Compute Nodes
Remote port forwarding from compute nodes allows applications running on the compute nodes to access hosts outside Salomon Cluster.
First, establish the remote port forwarding form the login node, as [described above](#port-forwarding-from-login-nodes).
Second, invoke port forwarding from the compute node to the login node. Insert following line into your jobscript or interactive shell
```console
$ ssh -TN -f -L 6000:localhost:6000 login1
```
In this example, we assume that port forwarding from login1:6000 to remote.host.com:1234 has been established beforehand. By accessing localhost:6000, an application running on a compute node will see response of remote.host.com:1234
### Using Proxy Servers
Port forwarding is static, each single port is mapped to a particular port on remote host. Connection to other remote host, requires new forward.
!!! note
Applications with inbuilt proxy support, experience unlimited access to remote hosts, via single proxy server.
To establish local proxy server on your workstation, install and run SOCKS proxy server software. On Linux, sshd demon provides the functionality. To establish SOCKS proxy server listening on port 1080 run:
```console
local $ ssh -D 1080 localhost
```
On Windows, install and run the free, open source [Sock Puppet](http://sockspuppet.com/) server.
Once the proxy server is running, establish ssh port forwarding from Salomon to the proxy server, port 1080, exactly as [described above](#port-forwarding-from-login-nodes).
```console
local $ ssh -R 6000:localhost:1080 salomon.it4i.cz
```
Now, configure the applications proxy settings to **localhost:6000**. Use port forwarding to access the [proxy server from compute nodes](#port-forwarding-from-compute-nodes) as well.
## Graphical User Interface
* The [X Window system](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters.
* The [Virtual Network Computing](../general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer).
## VPN Access
* Access to IT4Innovations internal resources via [VPN](/general/accessing-the-clusters/vpn-access/).