diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 42c57b4b6386c6aeb20ebc0bd5ae1a8793d60ef7..75f6648390d5a79c14b2ba505f6f61aeb18cba54 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,7 +9,7 @@ docs: image: davidhrbac/docker-mdcheck:latest allow_failure: true script: - - mdl -r ~MD013 *.md docs.it4i/ + - mdl -r ~MD013,~MD033,~MD014,~MD026,~MD037 *.md docs.it4i/ two spaces: stage: test @@ -17,7 +17,14 @@ two spaces: allow_failure: true script: - echo "== Files having more than one space betwee two characters ==" - - find docs.it4i/ -name '*.md' -exec grep "[[:alpha:]] [[:alpha:]]" -l {} + + - find docs.it4i/ -name '*.md' -exec grep "[[:alpha:]] [[:alpha:]]" -l {} + || true + +capitalize: + stage: test + image: davidhrbac/docker-mkdocscheck:latest + allow_failure: true + script: + - find docs.it4i/ -name '*.md' -print0 | xargs -0 -n1 scripts/titlemd_test.py spell check: stage: test @@ -46,11 +53,18 @@ mkdocs: script: #- apt-get update #- apt-get -y install git + # add version to footer - bash scripts/add_version.sh + # get modules list from clusters - bash scripts/get_modules.sh + # build pages - mkdocs build + # compress search_index.json - bash scripts/clean_json.sh site/mkdocs/search_index.json + # replace broken links in 404.html - sed -i 's,href="" title=",href="/" title=",g' site/404.html + # compress sitemap + - gzip < site/sitemap.xml > site/sitemap.xml.gz artifacts: paths: - site diff --git a/.spelling b/.spelling index 8bb1fc223af4955de6d57fa26186b23630386154..1cb0368c8bf540de371e775f49e4848e7a78d425 100644 --- a/.spelling +++ b/.spelling @@ -3,17 +3,25 @@ # global dictionary is at the start, file overrides afterwards # one word per line, to define a file override use ' - filename' # where filename is relative to this configuration file +COM +.ssh Anselm IT4I IT4Innovations PBS Salomon TurboVNC +VNC DDR3 DIMM InfiniBand CUDA +ORCA COMSOL +API +GNU +CUDA +NVIDIA LiveLink MATLAB Allinea @@ -63,6 +71,7 @@ MPICH MVAPICH2 OpenBLAS ScaLAPACK +PAPI SGI UV2000 400GB diff --git a/README.md b/README.md index baa0ca6b2e9b2aaf82e4e9407d5a255172ecb4c6..e36d8f94c1b09d115e81842746c98e039b23f7b7 100644 --- a/README.md +++ b/README.md @@ -4,19 +4,19 @@ This is project contain IT4Innovation user documentation source. ## Environments -* https://docs-new.it4i.cz - master branch -* https://docs-new.it4i.cz/devel/$BRANCH_NAME - maps the branches +* [https://docs-new.it4i.cz - master branch](https://docs-new.it4i.cz - master branch) +* [https://docs-new.it4i.cz/devel/$BRANCH_NAME](https://docs-new.it4i.cz/devel/$BRANCH_NAME) - maps the branches ## URLs -* http://facelessuser.github.io/pymdown-extensions/ -* http://squidfunk.github.io/mkdocs-material/ +* [http://facelessuser.github.io/pymdown-extensions/](http://facelessuser.github.io/pymdown-extensions/) +* [http://squidfunk.github.io/mkdocs-material/](http://squidfunk.github.io/mkdocs-material/) ## Rules -* spellcheck https://github.com/lukeapage/node-markdown-spellcheck +* [spellcheck https://github.com/lukeapage/node-markdown-spellcheck](spellcheck https://github.com/lukeapage/node-markdown-spellcheck) -* SI units http://physics.nist.gov/cuu/Units/checklist.html +* [SI units http://physics.nist.gov/cuu/Units/checklist.html](SI units http://physics.nist.gov/cuu/Units/checklist.html) ``` fair-share diff --git a/docs.it4i/anselm-cluster-documentation/compute-nodes.md b/docs.it4i/anselm-cluster-documentation/compute-nodes.md deleted file mode 100644 index 2d4f1707c960531d25e64c206d5895a7a4e10338..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/compute-nodes.md +++ /dev/null @@ -1,135 +0,0 @@ -Compute Nodes -============= - -Nodes Configuration -------------------- -Anselm is cluster of x86-64 Intel based nodes built on Bull Extreme Computing bullx technology. The cluster contains four types of compute nodes. - -###Compute Nodes Without Accelerator - -- 180 nodes -- 2880 cores in total -- two Intel Sandy Bridge E5-2665, 8-core, 2.4GHz processors per node -- 64 GB of physical memory per node -- one 500GB SATA 2,5” 7,2 krpm HDD per node -- bullx B510 blade servers -- cn[1-180] - -###Compute Nodes With GPU Accelerator - -- 23 nodes -- 368 cores in total -- two Intel Sandy Bridge E5-2470, 8-core, 2.3GHz processors per node -- 96 GB of physical memory per node -- one 500GB SATA 2,5” 7,2 krpm HDD per node -- GPU accelerator 1x NVIDIA Tesla Kepler K20 per node -- bullx B515 blade servers -- cn[181-203] - -###Compute Nodes With MIC Accelerator - -- 4 nodes -- 64 cores in total -- two Intel Sandy Bridge E5-2470, 8-core, 2.3GHz processors per node -- 96 GB of physical memory per node -- one 500GB SATA 2,5” 7,2 krpm HDD per node -- MIC accelerator 1x Intel Phi 5110P per node -- bullx B515 blade servers -- cn[204-207] - -###Fat Compute Nodes - -- 2 nodes -- 32 cores in total -- 2 Intel Sandy Bridge E5-2665, 8-core, 2.4GHz processors per node -- 512 GB of physical memory per node -- two 300GB SAS 3,5”15krpm HDD (RAID1) per node -- two 100GB SLC SSD per node -- bullx R423-E3 servers -- cn[208-209] - - - -**Figure Anselm bullx B510 servers** - -### Compute Nodes Summary - - |Node type|Count|Range|Memory|Cores|[Access](resources-allocation-policy/)| - |---|---|---|---|---|---| - |Nodes without accelerator|180|cn[1-180]|64GB|16 @ 2.4Ghz|qexp, qprod, qlong, qfree| - |Nodes with GPU accelerator|23|cn[181-203]|96GB|16 @ 2.3Ghz|qgpu, qprod| - |Nodes with MIC accelerator|4|cn[204-207]|96GB|16 @ 2.3GHz|qmic, qprod| - |Fat compute nodes|2|cn[208-209]|512GB|16 @ 2.4GHz|qfat, qprod| - -Processor Architecture ----------------------- -Anselm is equipped with Intel Sandy Bridge processors Intel Xeon E5-2665 (nodes without accelerator and fat nodes) and Intel Xeon E5-2470 (nodes with accelerator). Processors support Advanced Vector Extensions (AVX) 256-bit instruction set. - -### Intel Sandy Bridge E5-2665 Processor - -- eight-core -- speed: 2.4 GHz, up to 3.1 GHz using Turbo Boost Technology -- peak performance: 19.2 GFLOP/s per - core -- caches: - - L2: 256 KB per core - - L3: 20 MB per processor -- memory bandwidth at the level of the processor: 51.2 GB/s - -### Intel Sandy Bridge E5-2470 Processor - -- eight-core -- speed: 2.3 GHz, up to 3.1 GHz using Turbo Boost Technology -- peak performance: 18.4 GFLOP/s per - core -- caches: - - L2: 256 KB per core - - L3: 20 MB per processor -- memory bandwidth at the level of the processor: 38.4 GB/s - -Nodes equipped with Intel Xeon E5-2665 CPU have set PBS resource attribute cpu_freq = 24, nodes equipped with Intel Xeon E5-2470 CPU have set PBS resource attribute cpu_freq = 23. - -```bash -$ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16:cpu_freq=24 -I -``` - -In this example, we allocate 4 nodes, 16 cores at 2.4GHhz per node. - -Intel Turbo Boost Technology is used by default, you can disable it for all nodes of job by using resource attribute cpu_turbo_boost. - -```bash - $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16 -l cpu_turbo_boost=0 -I -``` - -Memory Architecture -------------------- - -### Compute Node Without Accelerator - -- 2 sockets -- Memory Controllers are integrated into processors. - - 8 DDR3 DIMMs per node - - 4 DDR3 DIMMs per CPU - - 1 DDR3 DIMMs per channel - - Data rate support: up to 1600MT/s -- Populated memory: 8 x 8 GB DDR3 DIMM 1600 MHz - -### Compute Node With GPU or MIC Accelerator - -- 2 sockets -- Memory Controllers are integrated into processors. - - 6 DDR3 DIMMs per node - - 3 DDR3 DIMMs per CPU - - 1 DDR3 DIMMs per channel - - Data rate support: up to 1600MT/s -- Populated memory: 6 x 16 GB DDR3 DIMM 1600 MHz - -### Fat Compute Node - -- 2 sockets -- Memory Controllers are integrated into processors. - - 16 DDR3 DIMMs per node - - 8 DDR3 DIMMs per CPU - - 2 DDR3 DIMMs per channel - - Data rate support: up to 1600MT/s -- Populated memory: 16 x 32 GB DDR3 DIMM 1600 MHz diff --git a/docs.it4i/anselm-cluster-documentation/hardware-overview.md b/docs.it4i/anselm-cluster-documentation/hardware-overview.md deleted file mode 100644 index a13b08f2d82759b5385515ef1ae053590876f96c..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/hardware-overview.md +++ /dev/null @@ -1,61 +0,0 @@ -Hardware Overview -================= - -The Anselm cluster consists of 209 computational nodes named cn[1-209] of which 180 are regular compute nodes, 23 GPU Kepler K20 accelerated nodes, 4 MIC Xeon Phi 5110P accelerated nodes and 2 fat nodes. Each node is a powerful x86-64 computer, equipped with 16 cores (two eight-core Intel Sandy Bridge processors), at least 64 GB RAM, and local hard drive. The user access to the Anselm cluster is provided by two login nodes login[1,2]. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 320 TB /home disk storage to store the user files. The 146 TB shared /scratch storage is available for the scratch data. - -The Fat nodes are equipped with large amount (512 GB) of memory. Virtualization infrastructure provides resources to run long term servers and services in virtual mode. Fat nodes and virtual servers may access 45 TB of dedicated block storage. Accelerated nodes, fat nodes, and virtualization infrastructure are available [upon request](https://support.it4i.cz/rt) made by a PI. - -Schematic representation of the Anselm cluster. Each box represents a node (computer) or storage capacity: - - - -The cluster compute nodes cn[1-207] are organized within 13 chassis. - -There are four types of compute nodes: - -- 180 compute nodes without the accelerator -- 23 compute nodes with GPU accelerator - equipped with NVIDIA Tesla Kepler K20 -- 4 compute nodes with MIC accelerator - equipped with Intel Xeon Phi 5110P -- 2 fat nodes - equipped with 512 GB RAM and two 100 GB SSD drives - -[More about Compute nodes](compute-nodes/). - -GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](resources-allocation-policy/). - -All these nodes are interconnected by fast InfiniBand network and Ethernet network. [More about the Network](network/). -Every chassis provides InfiniBand switch, marked **isw**, connecting all nodes in the chassis, as well as connecting the chassis to the upper level switches. - -All nodes share 360 TB /home disk storage to store user files. The 146 TB shared /scratch storage is available for the scratch data. These file systems are provided by Lustre parallel file system. There is also local disk storage available on all compute nodes in /lscratch. [More about Storage](storage/). - -The user access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing cluster.](shell-and-data-access/) - -The parameters are summarized in the following tables: - -|**In general**|| -|---|---| -|Primary purpose|High Performance Computing| -|Architecture of compute nodes|x86-64| -|Operating system|Linux| -|[**Compute nodes**](compute-nodes/)|| -|Totally|209| -|Processor cores|16 (2 x 8 cores)| -|RAM|min. 64 GB, min. 4 GB per core| -|Local disk drive|yes - usually 500 GB| -|Compute network|InfiniBand QDR, fully non-blocking, fat-tree| -|w/o accelerator|180, cn[1-180]| -|GPU accelerated|23, cn[181-203]| -|MIC accelerated|4, cn[204-207]| -|Fat compute nodes|2, cn[208-209]| -|**In total**|| -|Total theoretical peak performance (Rpeak)|94 TFLOP/s| -|Total max. LINPACK performance (Rmax)|73 TFLOP/s| -|Total amount of RAM|15.136 TB| - - |Node|Processor|Memory|Accelerator| - |---|---|---|---| - |w/o accelerator|2 x Intel Sandy Bridge E5-2665, 2.4 GHz|64 GB|-| - |GPU accelerated|2 x Intel Sandy Bridge E5-2470, 2.3 GHz|96 GB|NVIDIA Kepler K20| - |MIC accelerated|2 x Intel Sandy Bridge E5-2470, 2.3 GHz|96 GB|Intel Xeon Phi 5110P| - |Fat compute node|2 x Intel Sandy Bridge E5-2665, 2.4 GHz|512 GB|-| - -For more details please refer to the [Compute nodes](compute-nodes/), [Storage](storage/), and [Network](network/). diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md deleted file mode 100644 index d712659f5794a75e75c96f64c0db3d107cfb2822..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md +++ /dev/null @@ -1,56 +0,0 @@ -ANSYS LS-DYNA -============= - -**[ANSYSLS-DYNA](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/Explicit+Dynamics/ANSYS+LS-DYNA)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. - -To run ANSYS LS-DYNA in batch mode you can utilize/modify the default ansysdyna.pbs script and execute it via the qsub command. - -```bash -#!/bin/bash -#PBS -l nodes=2:ppn=16 -#PBS -q qprod -#PBS -N $USER-DYNA-Project -#PBS -A XX-YY-ZZ - -#! Mail to user when job terminate or abort -#PBS -m ae - -#!change the working directory (default is home directory) -#cd <working directory> -WORK_DIR="/scratch/$USER/work" -cd $WORK_DIR - -echo Running on host `hostname` -echo Time is `date` -echo Directory is `pwd` -echo This jobs runs on the following processors: -echo `cat $PBS_NODEFILE` - -#! Counts the number of processors -NPROCS=`wc -l < $PBS_NODEFILE` - -echo This job has allocated $NPROCS nodes - -module load ansys - -#### Set number of processors per host listing -#### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2) -procs_per_host=1 -#### Create host list -hl="" -for host in `cat $PBS_NODEFILE` -do - if [ "$hl" = "" ] - then hl="$host:$procs_per_host" - else hl="${hl}:$host:$procs_per_host" - fi -done - -echo Machines: $hl - -/ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl -``` - -Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. - -Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ANSYS solver via parameter i= diff --git a/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md b/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md deleted file mode 100644 index 0318c4a119730a4cabb6163287bb3dc5d2ef236d..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md +++ /dev/null @@ -1,45 +0,0 @@ -NWChem -====== - -**High-Performance Computational Chemistry** - -Introduction -------------------------- -NWChem aims to provide its users with computational chemistry tools that are scalable both in their ability to treat large scientific computational chemistry problems efficiently, and in their use of available parallel computing resources from high-performance parallel supercomputers to conventional workstation clusters. - -[Homepage](http://www.nwchem-sw.org/index.php/Main_Page) - -Installed versions ------------------- -The following versions are currently installed: - -- 6.1.1, not recommended, problems have been observed with this version -- 6.3-rev2-patch1, current release with QMD patch applied. Compiled with Intel compilers, MKL and Intel MPI -- 6.3-rev2-patch1-openmpi, same as above, but compiled with OpenMPI and NWChem provided BLAS instead of MKL. This version is expected to be slower -- 6.3-rev2-patch1-venus, this version contains only libraries for VENUS interface linking. Does not provide standalone NWChem executable - -For a current list of installed versions, execute: - -```bash - module avail nwchem -``` - -Running -------- -NWChem is compiled for parallel MPI execution. Normal procedure for MPI jobs applies. Sample jobscript: - -```bash - #PBS -A IT4I-0-0 - #PBS -q qprod - #PBS -l select=1:ncpus=16 - - module add nwchem/6.3-rev2-patch1 - mpirun -np 16 nwchem h2o.nw -``` - -Options --------------------- -Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : - -- MEMORY : controls the amount of memory NWChem will use -- SCRATCH_DIR : set this to a directory in [SCRATCH file system](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g.. "scf direct" diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md deleted file mode 100644 index fac34c5e8fa3b822b27ac40f5a70a15b839198e0..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md +++ /dev/null @@ -1,74 +0,0 @@ -Intel VTune Amplifier -===================== - -Introduction ------------- -Intel*® *VTune™ Amplifier, part of Intel Parallel studio, is a GUI profiling tool designed for Intel processors. It offers a graphical performance analysis of single core and multithreaded applications. A highlight of the features: - -- Hotspot analysis -- Locks and waits analysis -- Low level specific counters, such as branch analysis and memory - bandwidth -- Power usage analysis - frequency and sleep states. - - - -Usage ------ -To launch the GUI, first load the module: - -```bash - $ module add VTune/2016_update1 -``` - -and launch the GUI : - -```bash - $ amplxe-gui -``` - -!!! Note "Note" - To profile an application with VTune Amplifier, special kernel modules need to be loaded. The modules are not loaded on Anselm login nodes, thus direct profiling on login nodes is not possible. Use VTune on compute nodes and refer to the documentation on using GUI applications. - -The GUI will open in new window. Click on "*New Project...*" to create a new project. After clicking *OK*, a new window with project properties will appear. At "*Application:*", select the bath to your binary you want to profile (the binary should be compiled with -g flag). Some additional options such as command line arguments can be selected. At "*Managed code profiling mode:*" select "*Native*" (unless you want to profile managed mode .NET/Mono applications). After clicking *OK*, your project is created. - -To run a new analysis, click "*New analysis...*". You will see a list of possible analysis. Some of them will not be possible on the current CPU (e.g. Intel Atom analysis is not possible on Sandy Bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "*Advanced Hotspots*". Clicking on *Start *will start profiling of the application. - -Remote Analysis ---------------- -VTune Amplifier also allows a form of remote analysis. In this mode, data for analysis is collected from the command line without GUI, and the results are then loaded to GUI on another machine. This allows profiling without interactive graphical jobs. To perform a remote analysis, launch a GUI somewhere, open the new analysis window and then click the button "*Command line*" in bottom right corner. It will show the command line needed to perform the selected analysis. - -The command line will look like this: - -```bash - /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -collect advanced-hotspots -knob collection-detail=stack-and-callcount -mrte-mode=native -target-duration-type=veryshort -app-working-dir /home/sta545/test -- /home/sta545/test_pgsesv -``` - -Copy the line to clipboard and then you can paste it in your jobscript or in command line. After the collection is run, open the GUI once again, click the menu button in the upper right corner, and select "*Open > Result...*". The GUI will load the results from the run. - -Xeon Phi --------- -!!! Note "Note" - This section is outdated. It will be updated with new information soon. - -It is possible to analyze both native and offload Xeon Phi applications. For offload mode, just specify the path to the binary. For native mode, you need to specify in project properties: - -Application: ssh - -Application parameters: mic0 source ~/.profile && /path/to/your/bin - -Note that we include source ~/.profile in the command to setup environment paths [as described here](../intel-xeon-phi/). - -!!! Note "Note" - If the analysis is interrupted or aborted, further analysis on the card might be impossible and you will get errors like "ERROR connecting to MIC card". In this case please contact our support to reboot the MIC card. - -You may also use remote analysis to collect data from the MIC and then analyze it in the GUI later : - -```bash - $ amplxe-cl -collect knc-hotspots -no-auto-finalize -- ssh mic0 - "export LD_LIBRARY_PATH=/apps/intel/composer_xe_2015.2.164/compiler/lib/mic/:/apps/intel/composer_xe_2015.2.164/mkl/lib/mic/; export KMP_AFFINITY=compact; /tmp/app.mic" -``` - -References ----------- -1. <https://www.rcac.purdue.edu/tutorials/phi/PerformanceTuningXeonPhi-Tullos.pdf> Performance Tuning for Intel® Xeon Phi™ Coprocessors diff --git a/docs.it4i/anselm-cluster-documentation/software/index.md b/docs.it4i/anselm-cluster-documentation/software/index.md deleted file mode 100644 index 2cbcedb9cf9af259990f7eb19b93a97858793ed4..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/index.md +++ /dev/null @@ -1,86 +0,0 @@ -Anselm Cluster Software -=== - -## [Modules](../../modules-anselm) -* List of available modules -## [COMSOL](comsol-multiphysics) -* A finite element analysis, solver and Simulation software -## [ParaView](paraview) -* An open-source, multi-platform data analysis and visualization application -## [Compilers](compilers) -* Available compilers, including GNU, INTEL and UPC compilers -## [NVIDIA CUDA](nvidia-cuda) -* A guide to NVIDIA CUDA programming and GPU usage -## [GPI-2](gpi2) -* A library that implements the GASPI specification -## [OpenFOAM](openfoam) -* A free, open source CFD software package -## [ISV Licenses](isv_licenses) -* A guide to managing Independent Software Vendor licenses -## [Intel Xeon Phi](intel-xeon-phi) -* A guide to Intel Xeon Phi usage -## [Virtualization](kvirtualization) -## [Java](java) -* Java on ANSELM -## [Operating System](operating-system) -* The operating system, deployed on ANSELM -## Intel Suite -* The Intel Parallel Studio XE -### [Introduction](intel-suite/introduction) -### [Intel MKL](intel-suite/intel-mkl) -### [Intel Compilers](intel-suite/intel-compilers) -### [Intel IPP](intel-suite/intel-integrated-performance-primitives) -### [Intel TBB](intel-suite/intel-tbb) -### [Intel Debugger](intel-suite/intel-debugger) -## MPI -* Message Passing Interface libraries -### [Introduction](mpi/mpi) -### [MPI4Py (MPI for Python)](mpi/mpi4py-mpi-for-python) -### [Running OpenMPI](mpi/Running_OpenMPI) -### [Running MPICH2](mpi/running-mpich2) -## Numerical Libraries -* Libraries for numerical computations -### [Intel numerical libraries](numerical-libraries/intel-numerical-libraries) -### [PETSc](numerical-libraries/petsc) -### [Trilinos](numerical-libraries/trilinos) -### [FFTW](numerical-libraries/fftw) -### [GSL](numerical-libraries/gsl) -### [MAGMA for Intel Xeon Phi](numerical-libraries/magma-for-intel-xeon-phi) -### [HDF5](numerical-libraries/hdf5) -## Omics Master -### [Diagnostic component (TEAM)](omics-master/diagnostic-component-team) -### [Prioritization component (BiERapp)](omics-master/priorization-component-bierapp) -### [Overview](omics-master/overview) -## Debuggers -* A collection of development tools -### [Valgrind](debuggers/valgrind) -### [PAPI](debuggers/papi) -### [Allinea Forge (DDT,MAP)](debuggers/allinea-ddt) -### [Total View](debuggers/total-view) -### [CUBE](debuggers/cube) -### [Intel VTune Amplifier](debuggers/intel-vtune-amplifier) -### [VNC](debuggers/debuggers) -### [Scalasca](debuggers/scalasca) -### [Score-P](debuggers/score-p) -### [Intel Performance Counter Monitor](debuggers/intel-performance-counter-monitor) -### [Allinea Performance Reports](debuggers/allinea-performance-reports) -### [Vampir](debuggers/vampir) -## Numerical Languages -* Interpreted languages for numerical computations -### [Introduction](numerical-languages/introduction) -### [R](numerical-languages/r) -### [MATLAB 2013-2014](numerical-languages/matlab_1314) -### [MATLAB](numerical-languages/matlab) -### [Octave](numerical-languages/octave) -## Chemistry -* Tools for computational chemistry -### [Molpro](chemistry/molpro) -### [NWChem](chemistry/nwchem) -## ANSYS -* An engineering simulation software -### [Introduction](ansys/ansys) -### [ANSYS CFX](ansys/ansys-cfx) -### [ANSYS LS-DYNA](ansys/ansys-ls-dyna) -### [ANSYS MAPDL](ansys/ansys-mechanical-apdl) -### [LS-DYNA](ansys/ls-dyna) -### [ANSYS Fluent](ansys/ansys-fluent) diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md deleted file mode 100644 index e8c956b5decbcbc5d757dcb287f7504a735b0784..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md +++ /dev/null @@ -1,83 +0,0 @@ -MAGMA for Intel Xeon Phi -======================== - -Next generation dense algebra library for heterogeneous systems with accelerators - -### Compiling and linking with MAGMA - -To be able to compile and link code with MAGMA library user has to load following module: - -```bash - $ module load magma/1.3.0-mic -``` - -To make compilation more user friendly module also sets these two environment variables: - -!!! Note "Note" - MAGMA_INC - contains paths to the MAGMA header files (to be used for compilation step) - -!!! Note "Note" - MAGMA_LIBS - contains paths to MAGMA libraries (to be used for linking step). - -Compilation example: - -```bash - $ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall $MAGMA_INC -c testing_dgetrf_mic.cpp -o testing_dgetrf_mic.o - - $ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall -fPIC -Xlinker -zmuldefs -Wall -DNOCHANGE -DHOST testing_dgetrf_mic.o -o testing_dgetrf_mic $MAGMA_LIBS -``` - -### Running MAGMA code - -MAGMA implementation for Intel MIC requires a MAGMA server running on accelerator prior to executing the user application. The server can be started and stopped using following scripts: - -!!! Note "Note" - To start MAGMA server use: - **$MAGMAROOT/start_magma_server** - -!!! Note "Note" - To stop the server use: - **$MAGMAROOT/stop_magma_server** - -!!! Note "Note" - For deeper understanding how the MAGMA server is started, see the following script: - **$MAGMAROOT/launch_anselm_from_mic.sh** - -To test if the MAGMA server runs properly we can run one of examples that are part of the MAGMA installation: - -```bash - [user@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic - - [user@cn204 ~]$ export OMP_NUM_THREADS=16 - - [lriha@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic - Usage: /apps/libs/magma-mic/magmamic-1.3.0/testing/testing_dgetrf_mic [options] [-h|--help] - - M N CPU GFlop/s (sec) MAGMA GFlop/s (sec) ||PA-LU||/(||A||*N) - ========================================================================= - 1088 1088 --- ( --- ) 13.93 ( 0.06) --- - 2112 2112 --- ( --- ) 77.85 ( 0.08) --- - 3136 3136 --- ( --- ) 183.21 ( 0.11) --- - 4160 4160 --- ( --- ) 227.52 ( 0.21) --- - 5184 5184 --- ( --- ) 258.61 ( 0.36) --- - 6208 6208 --- ( --- ) 333.12 ( 0.48) --- - 7232 7232 --- ( --- ) 416.52 ( 0.61) --- - 8256 8256 --- ( --- ) 446.97 ( 0.84) --- - 9280 9280 --- ( --- ) 461.15 ( 1.16) --- - 10304 10304 --- ( --- ) 500.70 ( 1.46) --- -``` - -!!! Note "Note" - Please note: MAGMA contains several benchmarks and examples that can be found in: - **$MAGMAROOT/testing/** - -!!! Note "Note" - MAGMA relies on the performance of all CPU cores as well as on the performance of the accelerator. Therefore on Anselm number of CPU OpenMP threads has to be set to 16: - **export OMP_NUM_THREADS=16** - - -See more details at [MAGMA home page](http://icl.cs.utk.edu/magma/). - -References ----------- -[1] MAGMA MIC: Linear Algebra Library for Intel Xeon Phi Coprocessors, Jack Dongarra et. al, [http://icl.utk.edu/projectsfiles/magma/pubs/24-MAGMA_MIC_03.pdf](http://icl.utk.edu/projectsfiles/magma/pubs/24-MAGMA_MIC_03.pdf) diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/overview.md b/docs.it4i/anselm-cluster-documentation/software/omics-master/overview.md deleted file mode 100644 index f8434816050ab7935f240c4d386a08a3230c2b09..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/overview.md +++ /dev/null @@ -1,391 +0,0 @@ -Overview -======== - -The human NGS data processing solution - -Introduction ------------- -The scope of this OMICS MASTER solution is restricted to human genomics research (disease causing gene discovery in whole human genome or exome) or diagnosis (panel sequencing), although it could be extended in the future to other usages. - -The pipeline inputs the raw data produced by the sequencing machines and undergoes a processing procedure that consists on a quality control, the mapping and variant calling steps that result in a file containing the set of variants in the sample. From this point, the prioritization component or the diagnostic component can be launched. - - - -**Figure 1.** OMICS MASTER solution overview. Data is produced in the external labs and comes to IT4I (represented by the blue dashed line). The data pre-processor converts raw data into a list of variants and annotations for each sequenced patient. These lists files together with primary and secondary (alignment) data files are stored in IT4I sequence DB and uploaded to the discovery (candidate prioritization) or diagnostic component where they can be analyzed directly by the user that produced them, depending of the experimental design carried out. - -Typical genomics pipelines are composed by several components that need to be launched manually. The advantage of OMICS MASTER pipeline is that all these components are invoked sequentially in an automated way. - -OMICS MASTER pipeline inputs a FASTQ file and outputs an enriched VCF file. This pipeline is able to queue all the jobs to PBS by only launching a process taking all the necessary input files and creates the intermediate and final folders - -Let’s see each of the OMICS MASTER solution components: - -Components ----------- - -### Processing - -This component is composed by a set of programs that carry out quality controls, alignment, realignment, variant calling and variant annotation. It turns raw data from the sequencing machine into files containing lists of variants (VCF) that once annotated, can be used by the following components (discovery and diagnosis). - -We distinguish three types of sequencing instruments: bench sequencers (MySeq, IonTorrent, and Roche Junior, although this last one is about being discontinued), which produce relatively Genomes in the clinic - -low throughput (tens of million reads), and high end sequencers, which produce high throughput (hundreds of million reads) among which we have Illumina HiSeq 2000 (and new models) and SOLiD. All of them but SOLiD produce data in sequence format. SOLiD produces data in a special format called colour space that require of specific software for the mapping process. Once the mapping has been done, the rest of the pipeline is identical. Anyway, SOLiD is a technology which is also about being discontinued by the manufacturer so, this type of data will be scarce in the future. - -#### Quality control, preprocessing and statistics for FASTQ - -FastQC& FastQC. - -These steps are carried out over the original FASTQ file with optimized scripts and includes the following steps: sequence cleansing, estimation of base quality scores, elimination of duplicates and statistics. - -Input: **FASTQ file.** - -Output: **FASTQ file plus an HTML file containing statistics on the data.** - -FASTQ format It represents the nucleotide sequence and its corresponding quality scores. - - -**Figure 2.**FASTQ file. - -#### Mapping - -Component:** Hpg-aligner.** - -Sequence reads are mapped over the human reference genome. SOLiD reads are not covered by this solution; they should be mapped with specific software (among the few available options, SHRiMP seems to be the best one). For the rest of NGS machine outputs we use HPG Aligner. HPG-Aligner is an innovative solution, based on a combination of mapping with BWT and local alignment with Smith-Waterman (SW), that drastically increases mapping accuracy (97% versus 62-70% by current mappers, in the most common scenarios). This proposal provides a simple and fast solution that maps almost all the reads, even those containing a high number of mismatches or indels. - -Input: **FASTQ file.** - -Output:** Aligned file in BAM format.** - -**Sequence Alignment/Map (SAM)** - -It is a human readable tab-delimited format in which each read and its alignment is represented on a single line. The format can represent unmapped reads, reads that are mapped to unique locations, and reads that are mapped to multiple locations. - -The SAM format (1) consists of one header section and one alignment section. The lines in the header section start with character ‘@’, and lines in the alignment section do not. All lines are TAB delimited. - -In SAM, each alignment line has 11 mandatory fields and a variable number of optional fields. The mandatory fields are briefly described in Table 1. They must be present but their value can be a ‘*’ or a zero (depending on the field) if the -corresponding information is unavailable. - - |**No.** |**Name** |**Description**| - |--|--| - |1 |QNAME |Query NAME of the read or the read pai | - |2 |FLAG |Bitwise FLAG (pairing,strand,mate strand,etc.) | - |3 |RNAME |<p>Reference sequence NAME | - |4 |POS |<p>1-Based leftmost POSition of clipped alignment | - |5 |MAPQ |<p>MAPping Quality (Phred-scaled) | - |6 |CIGAR |<p>Extended CIGAR string (operations:MIDNSHP) | - |7 |MRNM |<p>Mate REference NaMe ('=' if same RNAME) | - |8 |MPOS |<p>1-Based leftmost Mate POSition | - |9 |ISIZE |<p>Inferred Insert SIZE | - |10 |SEQ |<p>Query SEQuence on the same strand as the reference | - |11 |QUAL |<p>Query QUALity (ASCII-33=Phred base quality) | - -**Table 1.** Mandatory fields in the SAM format. - -The standard CIGAR description of pairwise alignment defines three operations: ‘M’ for match/mismatch, ‘I’ for insertion compared with the reference and ‘D’ for deletion. The extended CIGAR proposed in SAM added four more operations: ‘N’ for skipped bases on the reference, ‘S’ for soft clipping, ‘H’ for hard clipping and ‘P’ for padding. These support splicing, clipping, multi-part and padded alignments. Figure 3 shows examples of CIGAR strings for different types of alignments. - - - -**Figure 3.** SAM format file. The ‘@SQ’ line in the header section gives the order of reference sequences. Notably, r001 is the name of a read pair. According to FLAG 163 (=1+2+32+128), the read mapped to position 7 is the second read in the pair (128) and regarded as properly paired (1 + 2); its mate is mapped to 37 on the reverse strand (32). Read r002 has three soft-clipped (unaligned) bases. The coordinate shown in SAM is the position of the first aligned base. The CIGAR string for this alignment contains a P (padding) operation which correctly aligns the inserted sequences. Padding operations can be absent when an aligner does not support multiple sequence alignment. The last six bases of read r003 map to position 9, and the first five to position 29 on the reverse strand. The hard clipping operation H indicates that the clipped sequence is not present in the sequence field. The NM tag gives the number of mismatches. Read r004 is aligned across an intron, indicated by the N operation. - -**Binary Alignment/Map (BAM)** - -BAM is the binary representation of SAM and keeps exactly the same information as SAM. BAM uses lossless compression to reduce the size of the data by about 75% and provides an indexing system that allows reads that overlap a region of the genome to be retrieved and rapidly traversed. - -#### Quality control, preprocessing and statistics for BAM - -**Component:** Hpg-Fastq & FastQC. Some features: - -- Quality control: % reads with N errors, % reads with multiple mappings, strand bias, paired-end insert, ... -- Filtering: by number of errors, number of hits, … - - Comparator: stats, intersection, ... - -**Input:** BAM file. - -**Output:** BAM file plus an HTML file containing statistics. - -#### Variant Calling - -Component:** GATK.** - -Identification of single nucleotide variants and indels on the alignments is performed using the Genome Analysis Toolkit (GATK). GATK (2) is a software package developed at the Broad Institute to analyze high-throughput sequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. - -**Input:** BAM - -**Output:** VCF - -**Variant Call Format (VCF)** - -VCF (3) is a standardized format for storing the most prevalent types of sequence variation, including SNPs, indels and larger structural variants, together with rich annotations. The format was developed with the primary intention to represent human genetic variation, but its use is not restricted >to diploid genomes and can be used in different contexts as well. Its flexibility and user extensibility allows representation of a wide variety of genomic variation with respect to a single reference sequence. - -A VCF file consists of a header section and a data section. The header contains an arbitrary number of metainformation lines, each starting with characters ‘##’, and a TAB delimited field definition line, starting with a single ‘#’ character. The meta-information header lines provide a standardized description of tags and annotations used in the data section. The use of meta-information allows the information stored within a VCF file to be tailored to the dataset in question. It can be also used to provide information about the means of file creation, date of creation, version of the reference sequence, software used and any other information relevant to the history of the file. The field definition line names eight mandatory columns, corresponding to data columns representing the chromosome (CHROM), a 1-based position of the start of the variant (POS), unique identifiers of the variant (ID), the reference allele (REF), a comma separated list of alternate non-reference alleles (ALT), a phred-scaled quality score (QUAL), site filtering information (FILTER) and a semicolon separated list of additional, user extensible annotation (INFO). In addition, if samples are present in the file, the mandatory header columns are followed by a FORMAT column and an arbitrary number of sample IDs that define the samples included in the VCF file. The FORMAT column is used to define the information contained within each subsequent genotype column, which consists of a colon separated list of fields. For example, the FORMAT field GT:GQ:DP in the fourth data entry of Figure 1a indicates that the subsequent entries contain information regarding the genotype, genotype quality and read depth for each sample. All data lines are TAB delimited and the number of fields in each data line must match the number of fields in the header line. It is strongly recommended that all annotation tags used are declared in the VCF header section. - - - -**Figure 4.** (a) Example of valid VCF. The header lines ##fileformat and #CHROM are mandatory, the rest is optional but strongly recommended. Each line of the body describes variants present in the sampled population at one genomic position or region. All alternate alleles are listed in the ALT column and referenced from the genotype fields as 1-based indexes to this list; the reference haplotype is designated as 0. For multiploid data, the separator indicates whether the data are phased (|) or unphased (/). Thus, the two alleles C and G at the positions 2 and 5 in this figure occur on the same chromosome in SAMPLE1. The first data line shows an example of a deletion (present in SAMPLE1) and a replacement of two bases by another base (SAMPLE2); the second line shows a SNP and an insertion; the third a SNP; the fourth a large structural variant described by the annotation in the INFO column, the coordinate is that of the base before the variant. (b–f ) Alignments and VCF representations of different sequence variants: SNP, insertion, deletion, replacement, and a large deletion. The REF columns shows the reference bases replaced by the haplotype in the ALT column. The coordinate refers to the first reference base. (g) Users are advised to use simplest representation possible and lowest coordinate in cases where the position is ambiguous. - -###Annotating - -**Component:** HPG-Variant - -The functional consequences of every variant found are then annotated using the HPG-Variant software, which extracts from CellBase**,** the Knowledge database, all the information relevant on the predicted pathologic effect of the variants. - -VARIANT (VARIant Analysis Tool) (4) reports information on the variants found that include consequence type and annotations taken from different databases and repositories (SNPs and variants from dbSNP and 1000 genomes, and disease-related variants from the Genome-Wide Association Study (GWAS) catalog, Online Mendelian Inheritance in Man (OMIM), Catalog of Somatic Mutations in Cancer (COSMIC) mutations, etc. VARIANT also produces a rich variety of annotations that include information on the regulatory (transcription factor or miRNAbinding sites, etc.) or structural roles, or on the selective pressures on the sites affected by the variation. This information allows extending the conventional reports beyond the coding regions and expands the knowledge on the contribution of non-coding or synonymous variants to the phenotype studied. - -**Input:** VCF - -**Output:** The output of this step is the Variant Calling Format (VCF) file, which contains changes with respect to the reference genome with the corresponding QC and functional annotations. - -#### CellBase - -CellBase(5) is a relational database integrates biological information from different sources and includes: - -**Core features:** - -We took genome sequences, genes, transcripts, exons, cytobands or cross references (xrefs) identifiers (IDs) from Ensembl (6). Protein information including sequences, xrefs or protein features (natural variants, mutagenesis sites, post-translational modifications, etc.) were imported from UniProt (7). - -**Regulatory:** - -CellBase imports miRNA from miRBase (8); curated and non-curated miRNA targets from miRecords (9), miRTarBase (10), -TargetScan(11) and microRNA.org (12) and CpG islands and conserved regions from the UCSC database (13). - -**Functional annotation** - -OBO Foundry (14) develops many biomedical ontologies that are implemented in OBO format. We designed a SQL schema to store these OBO ontologies and >30 ontologies were imported. OBO ontology term annotations were taken from Ensembl (6). InterPro (15) annotations were also imported. - -**Variation** - -CellBase includes SNPs from dbSNP (16)^; SNP population frequencies from HapMap (17), 1000 genomes project (18) and Ensembl (6); phenotypically annotated SNPs were imported from NHRI GWAS Catalog (19),HGMD (20), Open Access GWAS Database (21), UniProt (7) and OMIM (22); mutations from COSMIC (23) and structural variations from Ensembl (6). - -**Systems biology** - -We also import systems biology information like interactome information from IntAct (24). Reactome (25) stores pathway and interaction information in BioPAX (26) format. BioPAX data exchange format enables the integration of diverse pathway -resources. We successfully solved the problem of storing data released in BioPAX format into a SQL relational schema, which allowed us importing Reactome in CellBase. - -### [Diagnostic component (TEAM)](diagnostic-component-team/) - -### [Priorization component (BiERApp)](priorization-component-bierapp/) - -Usage ------ -First of all, we should load ngsPipeline module: - -```bash - $ module load ngsPipeline -``` - -This command will load python/2.7.5 module and all the required modules (hpg-aligner, gatk, etc) - -If we launch ngsPipeline with ‘-h’, we will get the usage help: - -```bash - $ ngsPipeline -h - Usage: ngsPipeline.py [-h] -i INPUT -o OUTPUT -p PED --project PROJECT --queue - QUEUE [--stages-path STAGES_PATH] [--email EMAIL] - [--prefix PREFIX] [-s START] [-e END] --log - - Python pipeline - - optional arguments: - -h, --help show this help message and exit - -i INPUT, --input INPUT - -o OUTPUT, --output OUTPUT - Output Data directory - -p PED, --ped PED Ped file with all individuals - --project PROJECT Project Id - --queue QUEUE Queue Id - --stages-path STAGES_PATH - Custom Stages path - --email EMAIL Email - --prefix PREFIX Prefix name for Queue Jobs name - -s START, --start START - Initial stage - -e END, --end END Final stage - --log Log to file - -``` - -Let us see a brief description of the arguments: - -```bash - *-h --help*. Show the help. - - *-i, --input.* The input data directory. This directory must to have a special structure. We have to create one folder per sample (with the same name). These folders will host the fastq files. These fastq files must have the following pattern “sampleName” + “_” + “1 or 2” + “.fq”. 1 for the first pair (in paired-end sequences), and 2 for the -second one. - - *-o , --output.* The output folder. This folder will contain all the intermediate and final folders. When the pipeline will be executed completely, we could remove the intermediate folders and keep only the final one (with the VCF file containing all the variants) - - *-p , --ped*. The ped file with the pedigree. This file contains all the sample names. These names must coincide with the names of the input folders. If our input folder contains more samples than the .ped file, the pipeline will use only the samples from the .ped file. - - *--email.* Email for PBS notifications. - - *--prefix.* Prefix for PBS Job names. - - *-s, --start & -e, --end.* Initial and final stage. If we want to launch the pipeline in a specific stage we must use -s. If we want to end the pipeline in a specific stage we must use -e. - - *--log*. Using log argument NGSpipeline will prompt all the logs to this file. - - *--project*>. Project ID of your supercomputer allocation. - - *--queue*. [Queue](../../resource-allocation-and-job-execution/introduction.html) to run the jobs in. -``` - -Input, output and ped arguments are mandatory. If the output folder does not exist, the pipeline will create it. - -Examples ---------------------- -This is an example usage of NGSpipeline: - -We have a folder with the following structure in - -```bash -/apps/bio/omics/1.0/sample_data/ >: - - /apps/bio/omics/1.0/sample_data - └── data - ├── file.ped - ├── sample1 - │ ├── sample1_1.fq - │ └── sample1_2.fq - └── sample2 - ├── sample2_1.fq - └── sample2_2.fq -``` - -The ped file ( file.ped) contains the following info: - -```bash - #family_ID sample_ID parental_ID maternal_ID sex phenotype - FAM sample_A 0 0 1 1 - FAM sample_B 0 0 2 2 -``` - -Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](../../storage/storage/): - -```bash - $ module load ngsPipeline - $ mkdir -p /scratch/$USER/omics/results - $ cp -r /apps/bio/omics/1.0/sample_data /scratch/$USER/omics/ -``` - -Now, we can launch the pipeline (replace OPEN-0-0 with your Project ID): - -```bash - $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped --project OPEN-0-0 --queue qprod -``` - -This command submits the processing [jobs to the queue](../../resource-allocation-and-job-execution/job-submission-and-execution.html). - -If we want to re-launch the pipeline from stage 4 until stage 20 we should use the next command: - -```bash - $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped -s 4 -e 20 --project OPEN-0-0 --queue qprod -``` - -Details on the pipeline ------------------------------------- - -The pipeline calls the following tools: -- [fastqc](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/), quality control tool for high throughput - sequence data. -- [gatk](https://www.broadinstitute.org/gatk/), The Genome Analysis Toolkit or GATK is a software package developed at - the Broad Institute to analyze high-throughput sequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size. -- [hpg-aligner](http://wiki.opencb.org/projects/hpg/doku.php?id=aligner:downloads), HPG Aligner has been designed to align short and long reads with high sensitivity, therefore any number of mismatches or indels are allowed. HPG Aligner implements and combines two well known algorithms: *Burrows-Wheeler Transform* (BWT) to speed-up mapping high-quality reads, and *Smith-Waterman*> (SW) to increase sensitivity when reads cannot be mapped using BWT. -- [hpg-fastq](http://docs.bioinfo.cipf.es/projects/fastqhpc/wiki), a quality control tool for high throughput sequence data. -- [hpg-variant](http://wiki.opencb.org/projects/hpg/doku.php?id=variant:downloads), The HPG Variant suite is an ambitious project aimed to provide a complete suite of tools to work with genomic variation data, from VCF tools to variant profiling or genomic statistics. It is being implemented using High Performance Computing technologies to provide the best performance possible. -- [picard](http://picard.sourceforge.net/), Picard comprises Java-based command-line utilities that manipulate SAM files, and a Java API (HTSJDK) for creating new programs that read and write SAM files. Both SAM text format and SAM binary (BAM) format are supported. -- [samtools](http://samtools.sourceforge.net/samtools-c.shtml), SAM Tools provide various utilities for manipulating alignments in the SAM format, including sorting, merging, indexing and generating alignments in a per-position format. -- [snpEff](http://snpeff.sourceforge.net/), Genetic variant annotation and effect prediction toolbox. - -This listing show which tools are used in each step of the pipeline : - -- stage-00: fastqc -- stage-01: hpg_fastq -- stage-02: fastqc -- stage-03: hpg_aligner and samtools -- stage-04: samtools -- stage-05: samtools -- stage-06: fastqc -- stage-07: picard -- stage-08: fastqc -- stage-09: picard -- stage-10: gatk -- stage-11: gatk -- stage-12: gatk -- stage-13: gatk -- stage-14: gatk -- stage-15: gatk -- stage-16: samtools -- stage-17: samtools -- stage-18: fastqc -- stage-19: gatk -- stage-20: gatk -- stage-21: gatk -- stage-22: gatk -- stage-23: gatk -- stage-24: hpg-variant -- stage-25: hpg-variant -- stage-26: snpEff -- stage-27: snpEff -- stage-28: hpg-variant - -Interpretation ---------------------------- - -The output folder contains all the subfolders with the intermediate data. This folder contains the final VCF with all the variants. This file can be uploaded into [TEAM](diagnostic-component-team.html) by using the VCF file button. It is important to note here that the entire management of the VCF file is local: no patient’s sequence data is sent over the Internet thus avoiding any problem of data privacy or confidentiality. - - - -**Figure 7**. *TEAM upload panel.* *Once the file has been uploaded, a panel must be chosen from the Panel* list. Then, pressing the Run button the diagnostic process starts. - -Once the file has been uploaded, a panel must be chosen from the Panel list. Then, pressing the Run button the diagnostic process starts. TEAM searches first for known diagnostic mutation(s) taken from four databases: HGMD-public (20), [HUMSAVAR](http://www.uniprot.org/docs/humsavar), ClinVar (29)^ and COSMIC (23). - - - -**Figure 7.** *The panel manager. The elements used to define a panel are (**A**) disease terms, (**B**) diagnostic mutations and (**C**) genes. Arrows represent actions that can be taken in the panel manager. Panels can be defined by using the known mutations and genes of a particular disease. This can be done by dragging them to the **Primary Diagnostic** box (action **D**). This action, in addition to defining the diseases in the **Primary Diagnostic** box, automatically adds the corresponding genes to the **Genes** box. The panels can be customized by adding new genes (action **F**) or removing undesired genes (action **G**). New disease mutations can be added independently or associated to an already existing disease term (action **E**). Disease terms can be removed by simply dragging them back (action **H**).* - -For variant discovering/filtering we should upload the VCF file into BierApp by using the following form: - -** - -**Figure 8.** *BierApp VCF upload panel. It is recommended to choose a name for the job as well as a description.** - -Each prioritization (‘job’) has three associated screens that facilitate the filtering steps. The first one, the ‘Summary’ tab, displays a statistic of the data set analyzed, containing the samples analyzed, the number and types of variants found and its distribution according to consequence types. The second screen, in the ‘Variants and effect’ tab, is the actual filtering tool, and the third one, the ‘Genome view’ tab, offers a representation of the selected variants within the genomic context provided by an embedded version of the Genome Maps Tool (30). - - - -**Figure 9.** This picture shows all the information associated to the variants. If a variant has an associated phenotype we could see it in the last column. In this case, the variant 7:132481242 C>T is associated to the phenotype: large intestine tumor. - -References ------------------------ - -1. Heng Li, Bob Handsaker, Alec Wysoker, Tim Fennell, Jue Ruan, Nils Homer, Gabor Marth5, Goncalo Abecasis6, Richard Durbin and 1000 Genome Project Data Processing Subgroup: The Sequence Alignment/Map format and SAMtools. Bioinformatics 2009, 25: 2078-2079. -2. McKenna A, Hanna M, Banks E, Sivachenko A, Cibulskis K, Kernytsky A, Garimella K, Altshuler D, Gabriel S, Daly M, DePristo MA: The Genome Analysis Toolkit: a MapReduce framework for analyzing next-generation DNA sequencing data. *Genome Res* >2010, 20:1297-1303. -3. Petr Danecek, Adam Auton, Goncalo Abecasis, Cornelis A. Albers, Eric Banks, Mark A. DePristo, Robert E. Handsaker, Gerton Lunter, Gabor T. Marth, Stephen T. Sherry, Gilean McVean, Richard Durbin, and 1000 Genomes Project Analysis Group. The variant call format and VCFtools. Bioinformatics 2011, 27: 2156-2158. -4. Medina I, De Maria A, Bleda M, Salavert F, Alonso R, Gonzalez CY, Dopazo J: VARIANT: Command Line, Web service and Web interface for fast and accurate functional characterization of variants found by Next-Generation Sequencing. Nucleic Acids Res 2012, 40:W54-58. -5. Bleda M, Tarraga J, de Maria A, Salavert F, Garcia-Alonso L, Celma M, Martin A, Dopazo J, Medina I: CellBase, a comprehensive collection of RESTful web services for retrieving relevant biological information from heterogeneous sources. Nucleic Acids Res 2012, 40:W609-614. -6. Flicek,P., Amode,M.R., Barrell,D., Beal,K., Brent,S., Carvalho-Silva,D., Clapham,P., Coates,G., Fairley,S., Fitzgerald,S. et al. (2012) Ensembl 2012. Nucleic Acids Res., 40, D84–D90. -7. UniProt Consortium. (2012) Reorganizing the protein space at the Universal Protein Resource (UniProt). Nucleic Acids Res., 40, D71–D75. -8. Kozomara,A. and Griffiths-Jones,S. (2011) miRBase: integrating microRNA annotation and deep-sequencing data. Nucleic Acids Res., 39, D152–D157. -9. Xiao,F., Zuo,Z., Cai,G., Kang,S., Gao,X. and Li,T. (2009) miRecords: an integrated resource for microRNA-target interactions. Nucleic Acids Res., 37, D105–D110. -10. Hsu,S.D., Lin,F.M., Wu,W.Y., Liang,C., Huang,W.C., Chan,W.L., Tsai,W.T., Chen,G.Z., Lee,C.J., Chiu,C.M. et al. (2011) miRTarBase: a database curates experimentally validated microRNA-target interactions. Nucleic Acids Res., 39, D163–D169. -11. Friedman,R.C., Farh,K.K., Burge,C.B. and Bartel,D.P. (2009) Most mammalian mRNAs are conserved targets of microRNAs. Genome Res., 19, 92–105. 12. Betel,D., Wilson,M., Gabow,A., Marks,D.S. and Sander,C. (2008) The microRNA.org resource: targets and expression. Nucleic Acids Res., 36, D149–D153. -13. Dreszer,T.R., Karolchik,D., Zweig,A.S., Hinrichs,A.S., Raney,B.J., Kuhn,R.M., Meyer,L.R., Wong,M., Sloan,C.A., Rosenbloom,K.R. et al. (2012) The UCSC genome browser database: extensions and updates 2011. Nucleic Acids Res.,40, D918–D923. -14. Smith,B., Ashburner,M., Rosse,C., Bard,J., Bug,W., Ceusters,W., Goldberg,L.J., Eilbeck,K., Ireland,A., Mungall,C.J. et al. (2007) The OBO Foundry: coordinated evolution of ontologies to support biomedical data integration. Nat. Biotechnol., 25, 1251–1255. -15. Hunter,S., Jones,P., Mitchell,A., Apweiler,R., Attwood,T.K.,Bateman,A., Bernard,T., Binns,D., Bork,P., Burge,S. et al. (2012) InterPro in 2011: new developments in the family and domain prediction database. Nucleic Acids Res.,40, D306–D312. -16. Sherry,S.T., Ward,M.H., Kholodov,M., Baker,J., Phan,L., Smigielski,E.M. and Sirotkin,K. (2001) dbSNP: the NCBI database of genetic variation. Nucleic Acids Res., 29, 308–311. -17. Altshuler,D.M., Gibbs,R.A., Peltonen,L., Dermitzakis,E., Schaffner,S.F., Yu,F., Bonnen,P.E., de Bakker,P.I., Deloukas,P., Gabriel,S.B. et al. (2010) Integrating common and rare genetic variation in diverse human populations. Nature, 467, 52–58. -18. 1000 Genomes Project Consortium. (2010) A map of human genome variation from population-scale sequencing. Nature, 467, 1061–1073. -19. Hindorff,L.A., Sethupathy,P., Junkins,H.A., Ramos,E.M., Mehta,J.P., Collins,F.S. and Manolio,T.A. (2009) Potential etiologic and functional implications of genome-wide association loci for human diseases and traits. Proc. Natl Acad. Sci. USA, 106, 9362–9367. -20. Stenson,P.D., Ball,E.V., Mort,M., Phillips,A.D., Shiel,J.A., Thomas,N.S., Abeysinghe,S., Krawczak,M. and Cooper,D.N. (2003) Human gene mutation database (HGMD): 2003 update. Hum. Mutat., 21, 577–581. -21. Johnson,A.D. and O’Donnell,C.J. (2009) An open access database of genome-wide association results. BMC Med. Genet, 10, 6. -22. McKusick,V. (1998) A Catalog of Human Genes and Genetic Disorders, 12th edn. John Hopkins University Press,Baltimore, MD. -23. Forbes,S.A., Bindal,N., Bamford,S., Cole,C., Kok,C.Y., Beare,D., Jia,M., Shepherd,R., Leung,K., Menzies,A. et al. (2011) COSMIC: mining complete cancer genomes in the catalogue of somatic mutations in cancer. Nucleic Acids Res., 39, D945–D950. -24. Kerrien,S., Aranda,B., Breuza,L., Bridge,A., Broackes-Carter,F., Chen,C., Duesbury,M., Dumousseau,M., Feuermann,M., Hinz,U. et al. (2012) The Intact molecular interaction database in 2012. Nucleic Acids Res., 40, D841–D846. -25. Croft,D., O’Kelly,G., Wu,G., Haw,R., Gillespie,M., Matthews,L., Caudy,M., Garapati,P., Gopinath,G., Jassal,B. et al. (2011) Reactome: a database of reactions, pathways and biological processes. Nucleic Acids Res., 39, D691–D697. -26. Demir,E., Cary,M.P., Paley,S., Fukuda,K., Lemer,C., Vastrik,I.,Wu,G., D’Eustachio,P., Schaefer,C., Luciano,J. et al. (2010) The BioPAX community standard for pathway data sharing. Nature Biotechnol., 28, 935–942. -27. Alemán Z, García-García F, Medina I, Dopazo J (2014): A web tool for the design and management of panels of genes for targeted enrichment and massive sequencing for clinical applications. Nucleic Acids Res 42: W83-7. -28. [Alemán A](http://www.ncbi.nlm.nih.gov/pubmed?term=Alem%C3%A1n%20A%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Garcia-Garcia F](http://www.ncbi.nlm.nih.gov/pubmed?term=Garcia-Garcia%20F%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Salavert F](http://www.ncbi.nlm.nih.gov/pubmed?term=Salavert%20F%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Medina I](http://www.ncbi.nlm.nih.gov/pubmed?term=Medina%20I%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Dopazo J](http://www.ncbi.nlm.nih.gov/pubmed?term=Dopazo%20J%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)> (2014). A web-based interactive framework to assist in the prioritization of disease candidate genes in whole-exome sequencing studies. [Nucleic Acids Res.](http://www.ncbi.nlm.nih.gov/pubmed/?term=BiERapp "Nucleic acids research.")>42 :W88-93. -29. Landrum,M.J., Lee,J.M., Riley,G.R., Jang,W., Rubinstein,W.S., Church,D.M. and Maglott,D.R. (2014) ClinVar: public archive of relationships among sequence variation and human phenotype. Nucleic Acids Res., 42, D980–D985. -30. Medina I, Salavert F, Sanchez R, de Maria A, Alonso R, Escobar P, Bleda M, Dopazo J: Genome Maps, a new generation genome browser. Nucleic Acids Res 2013, 41:W41-46. diff --git a/docs.it4i/anselm-cluster-documentation/software/operating-system.md b/docs.it4i/anselm-cluster-documentation/software/operating-system.md deleted file mode 100644 index 03a9cf45b0da087a378c1555974b53c81e41a09b..0000000000000000000000000000000000000000 --- a/docs.it4i/anselm-cluster-documentation/software/operating-system.md +++ /dev/null @@ -1,7 +0,0 @@ -Operating System -=============== - -The operating system on Anselm is Linux - **bullx Linux Server release 6.x** - -bullx Linux is based on Red Hat Enterprise Linux. bullx Linux is a Linux distribution provided by Bull and dedicated to HPC applications. - diff --git a/docs.it4i/anselm-cluster-documentation/capacity-computing.md b/docs.it4i/anselm/capacity-computing.md similarity index 75% rename from docs.it4i/anselm-cluster-documentation/capacity-computing.md rename to docs.it4i/anselm/capacity-computing.md index e916a14128f321a024e092e7c1e0a94e2c58fbd1..6ce94ca34b77ac4b6cc24168fc36ae4e8e0839fa 100644 --- a/docs.it4i/anselm-cluster-documentation/capacity-computing.md +++ b/docs.it4i/anselm/capacity-computing.md @@ -1,40 +1,37 @@ -Capacity computing -================== +# Capacity computing + +## Introduction -Introduction ------------- In many cases, it is useful to submit huge (>100+) number of computational jobs into the PBS queue system. Huge number of (small) jobs is one of the most effective ways to execute embarrassingly parallel calculations, achieving best runtime, throughput and computer utilization. However, executing huge number of jobs via the PBS queue may strain the system. This strain may result in slow response to commands, inefficient scheduling and overall degradation of performance and user experience, for all users. For this reason, the number of jobs is **limited to 100 per user, 1000 per job array** -!!! Note "Note" - Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time. +!!! note + Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time. -- Use [Job arrays](capacity-computing/#job-arrays) when running huge number of [multithread](capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs -- Use [GNU parallel](capacity-computing/#gnu-parallel) when running single core jobs -- Combine [GNU parallel with Job arrays](capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs +* Use [Job arrays](capacity-computing/#job-arrays) when running huge number of [multithread](capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs +* Use [GNU parallel](capacity-computing/#gnu-parallel) when running single core jobs +* Combine [GNU parallel with Job arrays](capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs -Policy ------- +## Policy -1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](capacity-computing/#job-arrays). -2. The array size is at most 1000 subjobs. +1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](capacity-computing/#job-arrays). +1. The array size is at most 1000 subjobs. -Job arrays --------------- +## Job Arrays -!!! Note "Note" - Huge number of jobs may be easily submitted and managed as a job array. +!!! note + Huge number of jobs may be easily submitted and managed as a job array. A job array is a compact representation of many jobs, called subjobs. The subjobs share the same job script, and have the same values for all attributes and resources, with the following exceptions: -- each subjob has a unique index, $PBS_ARRAY_INDEX -- job Identifiers of subjobs only differ by their indices -- the state of subjobs can differ (R,Q,...etc.) +* each subjob has a unique index, $PBS_ARRAY_INDEX +* job Identifiers of subjobs only differ by their indices +* the state of subjobs can differ (R,Q,...etc.) All subjobs within a job array have the same scheduling priority and schedule as independent jobs. Entire job array is submitted through a single qsub command and may be managed by qdel, qalter, qhold, qrls and qsig commands as a single job. -### Shared jobscript +### Shared Jobscript All subjobs in job array use the very same, single jobscript. Each subjob runs its own instance of the jobscript. The instances execute different work controlled by $PBS_ARRAY_INDEX variable. @@ -73,11 +70,11 @@ cp $PBS_O_WORKDIR/$TASK input ; cp $PBS_O_WORKDIR/myprog.x . cp output $PBS_O_WORKDIR/$TASK.out ``` -In this example, the submit directory holds the 900 input files, executable myprog.x and the jobscript file. As input for each run, we take the filename of input file from created tasklist file. We copy the input file to local scratch /lscratch/$PBS_JOBID, execute the myprog.x and copy the output file back to >the submit directory, under the $TASK.out name. The myprog.x runs on one node only and must use threads to run in parallel. Be aware, that if the myprog.x **is not multithreaded**, then all the **jobs are run as single thread programs in sequential** manner. Due to allocation of the whole node, the accounted time is equal to the usage of whole node**, while using only 1/16 of the node! +In this example, the submit directory holds the 900 input files, executable myprog.x and the jobscript file. As input for each run, we take the filename of input file from created tasklist file. We copy the input file to local scratch /lscratch/$PBS_JOBID, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The myprog.x runs on one node only and must use threads to run in parallel. Be aware, that if the myprog.x **is not multithreaded**, then all the **jobs are run as single thread programs in sequential** manner. Due to allocation of the whole node, the accounted time is equal to the usage of whole node, while using only 1/16 of the node! If huge number of parallel multicore (in means of multinode multithread, e. g. MPI enabled) jobs is needed to run, then a job array approach should also be used. The main difference compared to previous example using one node is that the local scratch should not be used (as it's not shared between nodes) and MPI or other technique for parallel multinode run has to be used properly. -### Submit the job array +### Submit the Job Array To submit the job array, use the qsub -J command. The 900 jobs of the [example above](capacity-computing/#array_example) may be submitted like this: @@ -96,7 +93,7 @@ $ qsub -N JOBNAME -J 9-10:2 jobscript This will only choose the lower index (9 in this example) for submitting/running your job. -### Manage the job array +### Manage the Job Array Check status of the job array by the qstat command. @@ -104,10 +101,10 @@ Check status of the job array by the qstat command. $ qstat -a 12345[].dm2 dm2: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -12345[].dm2 user2 qprod xx 13516 1 16 -- 00:50 B 00:02 +12345[].dm2 user2 qprod xx 13516 1 16 -- 00:50 B 00:02 ``` The status B means that some subjobs are already running. @@ -117,16 +114,16 @@ Check status of the first 100 subjobs by the qstat command. $ qstat -a 12345[1-100].dm2 dm2: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -12345[1].dm2 user2 qprod xx 13516 1 16 -- 00:50 R 00:02 -12345[2].dm2 user2 qprod xx 13516 1 16 -- 00:50 R 00:02 -12345[3].dm2 user2 qprod xx 13516 1 16 -- 00:50 R 00:01 -12345[4].dm2 user2 qprod xx 13516 1 16 -- 00:50 Q -- +12345[1].dm2 user2 qprod xx 13516 1 16 -- 00:50 R 00:02 +12345[2].dm2 user2 qprod xx 13516 1 16 -- 00:50 R 00:02 +12345[3].dm2 user2 qprod xx 13516 1 16 -- 00:50 R 00:01 +12345[4].dm2 user2 qprod xx 13516 1 16 -- 00:50 Q -- . . . . . . . . . . . , . . . . . . . . . . -12345[100].dm2 user2 qprod xx 13516 1 16 -- 00:50 Q -- +12345[100].dm2 user2 qprod xx 13516 1 16 -- 00:50 Q -- ``` Delete the entire job array. Running subjobs will be killed, queueing subjobs will be deleted. @@ -150,13 +147,12 @@ $ qstat -u $USER -tJ Read more on job arrays in the [PBSPro Users guide](../../pbspro-documentation/). -GNU parallel ----------------- +## GNU Parallel -!!! Note "Note" - Use GNU parallel to run many single core tasks on one node. +!!! note + Use GNU parallel to run many single core tasks on one node. -GNU parallel is a shell tool for executing jobs in parallel using one or more computers. A job can be a single command or a small script that has to be run for each of the lines in the input. GNU parallel is most useful in running single core jobs via the queue system on Anselm. +GNU parallel is a shell tool for executing jobs in parallel using one or more computers. A job can be a single command or a small script that has to be run for each of the lines in the input. GNU parallel is most useful in running single core jobs via the queue system on Anselm. For more information and examples see the parallel man page: @@ -165,7 +161,7 @@ $ module add parallel $ man parallel ``` -### GNU parallel jobscript +### GNU Parallel Jobscript The GNU parallel shell executes multiple instances of the jobscript using all cores on the node. The instances execute different work, controlled by the $PARALLEL_SEQ variable. @@ -201,7 +197,7 @@ TASK=$1 cp $PBS_O_WORKDIR/$TASK input # execute the calculation -cat input > output +cat input > output # copy output file to submit directory cp output $PBS_O_WORKDIR/$TASK.out @@ -209,7 +205,7 @@ cp output $PBS_O_WORKDIR/$TASK.out In this example, tasks from tasklist are executed via the GNU parallel. The jobscript executes multiple instances of itself in parallel, on all cores of the node. Once an instace of jobscript is finished, new instance starts until all entries in tasklist are processed. Currently processed entry of the joblist may be retrieved via $1 variable. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to local scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. -### Submit the job +### Submit the Job To submit the job, use the qsub command. The 101 tasks' job of the [example above](capacity-computing/#gp_example) may be submitted like this: @@ -218,22 +214,22 @@ $ qsub -N JOBNAME jobscript 12345.dm2 ``` -In this example, we submit a job of 101 tasks. 16 input files will be processed in parallel. The 101 tasks on 16 cores are assumed to complete in less than 2 hours. +In this example, we submit a job of 101 tasks. 16 input files will be processed in parallel. The 101 tasks on 16 cores are assumed to complete in less than 2 hours. -Please note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. +!!! hint + Use #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. -Job arrays and GNU parallel ---------------------------- +## Job Arrays and GNU Parallel -!!! Note "Note" - Combine the Job arrays and GNU parallel for best throughput of single core jobs +!!! note + Combine the Job arrays and GNU parallel for best throughput of single core jobs While job arrays are able to utilize all available computational nodes, the GNU parallel can be used to efficiently run multiple single-core jobs on single node. The two approaches may be combined to utilize all available (current and future) resources to execute single core jobs. -!!! Note "Note" - Every subjob in an array runs GNU parallel to utilize all cores on the node +!!! note + Every subjob in an array runs GNU parallel to utilize all cores on the node -### GNU parallel, shared jobscript +### GNU Parallel, Shared jobscript Combined approach, very similar to job arrays, can be taken. Job array is submitted to the queuing system. The subjobs run GNU parallel. The GNU parallel shell executes multiple instances of the jobscript using all cores on the node. The instances execute different work, controlled by the $PBS_JOB_ARRAY and $PARALLEL_SEQ variables. @@ -283,18 +279,18 @@ cat input > output cp output $PBS_O_WORKDIR/$TASK.out ``` -In this example, the jobscript executes in multiple instances in parallel, on all cores of a computing node. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to local scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The numtasks file controls how many tasks will be run per subjob. Once an task is finished, new task starts, until the number of tasks in numtasks file is reached. +In this example, the jobscript executes in multiple instances in parallel, on all cores of a computing node. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to local scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The numtasks file controls how many tasks will be run per subjob. Once an task is finished, new task starts, until the number of tasks in numtasks file is reached. -!!! Note "Note" - Select subjob walltime and number of tasks per subjob carefully +!!! note + Select subjob walltime and number of tasks per subjob carefully When deciding this values, think about following guiding rules: -1. Let n=N/16. Inequality (n+1) * T < W should hold. The N is number of tasks per subjob, T is expected single task walltime and W is subjob walltime. Short subjob walltime improves scheduling and job throughput. -2. Number of tasks should be modulo 16. -3. These rules are valid only when all tasks have similar task walltimes T. +1. Let n=N/16. Inequality (n+1) \* T < W should hold. The N is number of tasks per subjob, T is expected single task walltime and W is subjob walltime. Short subjob walltime improves scheduling and job throughput. +1. Number of tasks should be modulo 16. +1. These rules are valid only when all tasks have similar task walltimes T. -### Submit the job array +### Submit the Job Array (-J) To submit the job array, use the qsub -J command. The 992 tasks' job of the [example above](capacity-computing/#combined_example) may be submitted like this: @@ -305,10 +301,10 @@ $ qsub -N JOBNAME -J 1-992:32 jobscript In this example, we submit a job array of 31 subjobs. Note the -J 1-992:**32**, this must be the same as the number sent to numtasks file. Each subjob will run on full node and process 16 input files in parallel, 32 in total per subjob. Every subjob is assumed to complete in less than 2 hours. -Please note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. +!!! hint + Use #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. -Examples --------- +## Examples Download the examples in [capacity.zip](capacity.zip), illustrating the above listed ways to run huge number of jobs. We recommend to try out the examples, before using this for running production jobs. diff --git a/docs.it4i/anselm-cluster-documentation/capacity.zip b/docs.it4i/anselm/capacity.zip similarity index 100% rename from docs.it4i/anselm-cluster-documentation/capacity.zip rename to docs.it4i/anselm/capacity.zip diff --git a/docs.it4i/anselm/compute-nodes.md b/docs.it4i/anselm/compute-nodes.md new file mode 100644 index 0000000000000000000000000000000000000000..57a6df29e675632b1c5d1951232a7c2807313f15 --- /dev/null +++ b/docs.it4i/anselm/compute-nodes.md @@ -0,0 +1,130 @@ +# Compute Nodes + +## Nodes Configuration + +Anselm is cluster of x86-64 Intel based nodes built on Bull Extreme Computing bullx technology. The cluster contains four types of compute nodes. + +### Compute Nodes Without Accelerator + +* 180 nodes +* 2880 cores in total +* two Intel Sandy Bridge E5-2665, 8-core, 2.4GHz processors per node +* 64 GB of physical memory per node +* one 500GB SATA 2,5” 7,2 krpm HDD per node +* bullx B510 blade servers +* cn[1-180] + +### Compute Nodes With GPU Accelerator + +* 23 nodes +* 368 cores in total +* two Intel Sandy Bridge E5-2470, 8-core, 2.3GHz processors per node +* 96 GB of physical memory per node +* one 500GB SATA 2,5” 7,2 krpm HDD per node +* GPU accelerator 1x NVIDIA Tesla Kepler K20 per node +* bullx B515 blade servers +* cn[181-203] + +### Compute Nodes With MIC Accelerator + +* 4 nodes +* 64 cores in total +* two Intel Sandy Bridge E5-2470, 8-core, 2.3GHz processors per node +* 96 GB of physical memory per node +* one 500GB SATA 2,5” 7,2 krpm HDD per node +* MIC accelerator 1x Intel Phi 5110P per node +* bullx B515 blade servers +* cn[204-207] + +### Fat Compute Nodes + +* 2 nodes +* 32 cores in total +* 2 Intel Sandy Bridge E5-2665, 8-core, 2.4GHz processors per node +* 512 GB of physical memory per node +* two 300GB SAS 3,5”15krpm HDD (RAID1) per node +* two 100GB SLC SSD per node +* bullx R423-E3 servers +* cn[208-209] + + +**Figure Anselm bullx B510 servers** + +### Compute Nodes Summary + +| Node type | Count | Range | Memory | Cores | [Access](resources-allocation-policy/) | +| -------------------------- | ----- | ----------- | ------ | ----------- | -------------------------------------- | +| Nodes without accelerator | 180 | cn[1-180] | 64GB | 16 @ 2.4GHz | qexp, qprod, qlong, qfree | +| Nodes with GPU accelerator | 23 | cn[181-203] | 96GB | 16 @ 2.3GHz | qgpu, qprod | +| Nodes with MIC accelerator | 4 | cn[204-207] | 96GB | 16 @ 2.3GHz | qmic, qprod | +| Fat compute nodes | 2 | cn[208-209] | 512GB | 16 @ 2.4GHz | qfat, qprod | + +## Processor Architecture + +Anselm is equipped with Intel Sandy Bridge processors Intel Xeon E5-2665 (nodes without accelerator and fat nodes) and Intel Xeon E5-2470 (nodes with accelerator). Processors support Advanced Vector Extensions (AVX) 256-bit instruction set. + +### Intel Sandy Bridge E5-2665 Processor + +* eight-core +* speed: 2.4 GHz, up to 3.1 GHz using Turbo Boost Technology +* peak performance: 19.2 GFLOP/s per core +* caches: + * L2: 256 KB per core + * L3: 20 MB per processor +* memory bandwidth at the level of the processor: 51.2 GB/s + +### Intel Sandy Bridge E5-2470 Processor + +* eight-core +* speed: 2.3 GHz, up to 3.1 GHz using Turbo Boost Technology +* peak performance: 18.4 GFLOP/s per core +* caches: + * L2: 256 KB per core + * L3: 20 MB per processor +* memory bandwidth at the level of the processor: 38.4 GB/s + +Nodes equipped with Intel Xeon E5-2665 CPU have set PBS resource attribute cpu_freq = 24, nodes equipped with Intel Xeon E5-2470 CPU have set PBS resource attribute cpu_freq = 23. + +```bash +$ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16:cpu_freq=24 -I +``` + +In this example, we allocate 4 nodes, 16 cores at 2.4GHhz per node. + +Intel Turbo Boost Technology is used by default, you can disable it for all nodes of job by using resource attribute cpu_turbo_boost. + +```bash + $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16 -l cpu_turbo_boost=0 -I +``` + +## Memory Architecture + +### Compute Node Without Accelerator + +* 2 sockets +* Memory Controllers are integrated into processors. + * 8 DDR3 DIMMs per node + * 4 DDR3 DIMMs per CPU + * 1 DDR3 DIMMs per channel + * Data rate support: up to 1600MT/s +* Populated memory: 8 x 8 GB DDR3 DIMM 1600 MHz + +### Compute Node With GPU or MIC Accelerator + +* 2 sockets +* Memory Controllers are integrated into processors. + * 6 DDR3 DIMMs per node + * 3 DDR3 DIMMs per CPU + * 1 DDR3 DIMMs per channel + * Data rate support: up to 1600MT/s +* Populated memory: 6 x 16 GB DDR3 DIMM 1600 MHz + +### Fat Compute Node + +* 2 sockets +* Memory Controllers are integrated into processors. + * 16 DDR3 DIMMs per node + * 8 DDR3 DIMMs per CPU + * 2 DDR3 DIMMs per channel + * Data rate support: up to 1600MT/s +* Populated memory: 16 x 32 GB DDR3 DIMM 1600 MHz diff --git a/docs.it4i/anselm-cluster-documentation/environment-and-modules.md b/docs.it4i/anselm/environment-and-modules.md similarity index 69% rename from docs.it4i/anselm-cluster-documentation/environment-and-modules.md rename to docs.it4i/anselm/environment-and-modules.md index 0835ce008f277e982ecf3a297f40c43ecb60a46c..2aae813076a8f25d8b265cccb3d856f0dc8109fe 100644 --- a/docs.it4i/anselm-cluster-documentation/environment-and-modules.md +++ b/docs.it4i/anselm/environment-and-modules.md @@ -1,7 +1,6 @@ -Environment and Modules -======================= +# Environment and Modules -### Environment Customization +## Environment Customization After logging in, you may want to configure the environment. Write your preferred path definitions, aliases, functions and module loads in the .bashrc file @@ -17,22 +16,22 @@ fi alias qs='qstat -a' module load PrgEnv-gnu -# Display informations to standard output - only in interactive ssh session +# Display information to standard output - only in interactive ssh session if [ -n "$SSH_TTY" ] then module list # Display loaded modules fi ``` -!!! Note "Note" - Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Conside utilization of SSH session interactivity for such commands as stated in the previous example. +!!! note + Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Conside utilization of SSH session interactivity for such commands as stated in the previous example. -### Application Modules +## Application Modules -In order to configure your shell for running particular application on Anselm we use Module package interface. +In order to configure your shell for running particular application on Anselm we use Module package interface. -!!! Note "Note" - The modules set up the application paths, library paths and environment variables for running particular application. +!!! note + The modules set up the application paths, library paths and environment variables for running particular application. We have also second modules repository. This modules repository is created using tool called EasyBuild. On Salomon cluster, all modules will be build by this tool. If you want to use software from this modules repository, please follow instructions in section [Application Modules Path Expansion](environment-and-modules/#EasyBuild). @@ -44,7 +43,7 @@ To check available modules use $ module avail ``` -To load a module, for example the octave module use +To load a module, for example the octave module use ```bash $ module load octave @@ -76,10 +75,7 @@ PrgEnv-gnu sets up the GNU development environment in conjunction with the bullx PrgEnv-intel sets up the INTEL development environment in conjunction with the Intel MPI library -How to using modules in examples: -<tty-player controls src=/src/anselm/modules_anselm.ttyrec></tty-player> - -### Application Modules Path Expansion +## Application Modules Path Expansion All application modules on Salomon cluster (and further) will be build using tool called [EasyBuild](http://hpcugent.github.io/easybuild/ "EasyBuild"). In case that you want to use some applications that are build by EasyBuild already, you have to modify your MODULEPATH environment variable. diff --git a/docs.it4i/anselm/hardware-overview.md b/docs.it4i/anselm/hardware-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..f130bd152f8666dd30cf9d3a7021d04f4ffa99f3 --- /dev/null +++ b/docs.it4i/anselm/hardware-overview.md @@ -0,0 +1,60 @@ +# Hardware Overview + +The Anselm cluster consists of 209 computational nodes named cn[1-209] of which 180 are regular compute nodes, 23 GPU Kepler K20 accelerated nodes, 4 MIC Xeon Phi 5110P accelerated nodes and 2 fat nodes. Each node is a powerful x86-64 computer, equipped with 16 cores (two eight-core Intel Sandy Bridge processors), at least 64 GB RAM, and local hard drive. The user access to the Anselm cluster is provided by two login nodes login[1,2]. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 320 TB /home disk storage to store the user files. The 146 TB shared /scratch storage is available for the scratch data. + +The Fat nodes are equipped with large amount (512 GB) of memory. Virtualization infrastructure provides resources to run long term servers and services in virtual mode. Fat nodes and virtual servers may access 45 TB of dedicated block storage. Accelerated nodes, fat nodes, and virtualization infrastructure are available [upon request](https://support.it4i.cz/rt) made by a PI. + +Schematic representation of the Anselm cluster. Each box represents a node (computer) or storage capacity: + + + +The cluster compute nodes cn[1-207] are organized within 13 chassis. + +There are four types of compute nodes: + +* 180 compute nodes without the accelerator +* 23 compute nodes with GPU accelerator - equipped with NVIDIA Tesla Kepler K20 +* 4 compute nodes with MIC accelerator - equipped with Intel Xeon Phi 5110P +* 2 fat nodes - equipped with 512 GB RAM and two 100 GB SSD drives + +[More about Compute nodes](compute-nodes/). + +GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](resources-allocation-policy/). + +All these nodes are interconnected by fast InfiniBand network and Ethernet network. [More about the Network](network/). +Every chassis provides InfiniBand switch, marked **isw**, connecting all nodes in the chassis, as well as connecting the chassis to the upper level switches. + +All nodes share 360 TB /home disk storage to store user files. The 146 TB shared /scratch storage is available for the scratch data. These file systems are provided by Lustre parallel file system. There is also local disk storage available on all compute nodes in /lscratch. [More about Storage](storage/). + +The user access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing cluster.](shell-and-data-access/) + +The parameters are summarized in the following tables: + +| **In general** | | +| ------------------------------------------- | -------------------------------------------- | +| Primary purpose | High Performance Computing | +| Architecture of compute nodes | x86-64 | +| Operating system | Linux | +| [**Compute nodes**](compute-nodes/) | | +| Totally | 209 | +| Processor cores | 16 (2 x 8 cores) | +| RAM | min. 64 GB, min. 4 GB per core | +| Local disk drive | yes - usually 500 GB | +| Compute network | InfiniBand QDR, fully non-blocking, fat-tree | +| w/o accelerator | 180, cn[1-180] | +| GPU accelerated | 23, cn[181-203] | +| MIC accelerated | 4, cn[204-207] | +| Fat compute nodes | 2, cn[208-209] | +| **In total** | | +| Total theoretical peak performance (Rpeak) | 94 TFLOP/s | +| Total max. LINPACK performance (Rmax) | 73 TFLOP/s | +| Total amount of RAM | 15.136 TB | + +| Node | Processor | Memory | Accelerator | +| ---------------- | --------------------------------------- | ------ | -------------------- | +| w/o accelerator | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 64 GB | - | +| GPU accelerated | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB | NVIDIA Kepler K20 | +| MIC accelerated | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB | Intel Xeon Phi 5110P | +| Fat compute node | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 512 GB | - | + +For more details please refer to the [Compute nodes](compute-nodes/), [Storage](storage/), and [Network](network/). diff --git a/docs.it4i/anselm-cluster-documentation/introduction.md b/docs.it4i/anselm/introduction.md similarity index 51% rename from docs.it4i/anselm-cluster-documentation/introduction.md rename to docs.it4i/anselm/introduction.md index ffdac28026d210c248e820eaee604733ae5af8e4..b0fcc5dee2262a48b31d6a9cd8cfbed9622a3f84 100644 --- a/docs.it4i/anselm-cluster-documentation/introduction.md +++ b/docs.it4i/anselm/introduction.md @@ -1,13 +1,11 @@ -Introduction -============ +# Introduction Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totaling 3344 compute cores with 15 TB RAM and giving over 94 TFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64 GB RAM, and 500 GB hard disk drive. Nodes are interconnected by fully non-blocking fat-tree InfiniBand network and equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). -The cluster runs bullx Linux ([bull](http://www.bull.com/bullx-logiciels/systeme-exploitation.html)) [operating system](software/operating-system/), which is compatible with the RedHat [ Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) We have installed a wide range of software packages targeted at different scientific domains. These packages are accessible via the [modules environment](environment-and-modules/). +The cluster runs [operating system](software/operating-system/), which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) We have installed a wide range of software packages targeted at different scientific domains. These packages are accessible via the [modules environment](environment-and-modules/). User data shared file-system (HOME, 320 TB) and job data shared file-system (SCRATCH, 146 TB) are available to users. The PBS Professional workload manager provides [computing resources allocations and job execution](resources-allocation-policy/). -Read more on how to [apply for resources](../get-started-with-it4innovations/applying-for-resources/), [obtain login credentials,](../get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials/) and [access the cluster](shell-and-data-access/). - +Read more on how to [apply for resources](../general/applying-for-resources/), [obtain login credentials](../general/obtaining-login-credentials/obtaining-login-credentials/) and [access the cluster](shell-and-data-access/). diff --git a/docs.it4i/anselm-cluster-documentation/job-priority.md b/docs.it4i/anselm/job-priority.md similarity index 85% rename from docs.it4i/anselm-cluster-documentation/job-priority.md rename to docs.it4i/anselm/job-priority.md index 454bbf200ae2a540f473765501cb768224ea1793..06c7e921d38a35fac318acc7485dcf2c1a015ddf 100644 --- a/docs.it4i/anselm-cluster-documentation/job-priority.md +++ b/docs.it4i/anselm/job-priority.md @@ -1,18 +1,16 @@ -Job scheduling -============== +# Job scheduling -Job execution priority ----------------------- +## Job Execution Priority Scheduler gives each job an execution priority and then uses this job execution priority to select which job(s) to run. Job execution priority on Anselm is determined by these job properties (in order of importance): -1. queue priority -2. fair-share priority -3. eligible time +1. queue priority +1. fair-share priority +1. eligible time -### Queue priority +### Queue Priority Queue priority is priority of queue where job is queued before execution. @@ -20,7 +18,7 @@ Queue priority has the biggest impact on job execution priority. Execution prior Queue priorities can be seen at <https://extranet.it4i.cz/anselm/queues> -### Fair-share priority +### Fair-Share Priority Fair-share priority is priority calculated on recent usage of resources. Fair-share priority is calculated per project, all members of project share same fair-share priority. Projects with higher recent usage have lower fair-share priority than projects with lower or none recent usage. @@ -31,18 +29,18 @@ Fair-share priority is calculated as  where MAX_FAIRSHARE has value 1E6, -usage*Project* is cumulated usage by all members of selected project, -usage*Total* is total usage by all users, by all projects. +usage<sub>Project</sub> is cumulated usage by all members of selected project, +usage<sub>Total</sub> is total usage by all users, by all projects. Usage counts allocated core-hours (`ncpus x walltime`). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week). Jobs queued in queue qexp are not calculated to project's usage. -!!! Note "Note" - Calculated usage and fair-share priority can be seen at <https://extranet.it4i.cz/anselm/projects>. +!!! note + Calculated usage and fair-share priority can be seen at <https://extranet.it4i.cz/anselm/projects>. Calculated fair-share priority can be also seen as Resource_List.fairshare attribute of a job. -###Eligible time +### Eligible Time Eligible time is amount (in seconds) of eligible time job accrued while waiting to run. Jobs with higher eligible time gains higher priority. @@ -66,7 +64,7 @@ The scheduler makes a list of jobs to run in order of execution priority. Schedu It means, that jobs with lower execution priority can be run before jobs with higher execution priority. -!!! Note "Note" - It is **very beneficial to specify the walltime** when submitting jobs. +!!! note + It is **very beneficial to specify the walltime** when submitting jobs. Specifying more accurate walltime enables better scheduling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority. diff --git a/docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md b/docs.it4i/anselm/job-submission-and-execution.md similarity index 77% rename from docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md rename to docs.it4i/anselm/job-submission-and-execution.md index b500a5a2be7cc0275961858008136d8de903b1c7..b0ea19bd17cecb7fb2199c6112e0e0340a1d0b1a 100644 --- a/docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md +++ b/docs.it4i/anselm/job-submission-and-execution.md @@ -1,19 +1,18 @@ -Job submission and execution -============================ +# Job submission and execution + +## Job Submission -Job Submission --------------- When allocating computational resources for the job, please specify -1. suitable queue for your job (default is qprod) -2. number of computational nodes required -3. number of cores per node required -4. maximum wall time allocated to your calculation, note that jobs exceeding maximum wall time will be killed -5. Project ID -6. Jobscript or interactive switch +1. suitable queue for your job (default is qprod) +1. number of computational nodes required +1. number of cores per node required +1. maximum wall time allocated to your calculation, note that jobs exceeding maximum wall time will be killed +1. Project ID +1. Jobscript or interactive switch -!!! Note "Note" - Use the **qsub** command to submit your job to a queue for allocation of the computational resources. +!!! note + Use the **qsub** command to submit your job to a queue for allocation of the computational resources. Submit the job using the qsub command: @@ -41,15 +40,15 @@ In this example, we allocate 4 nodes, 16 cores per node, for 1 hour. We allocate $ qsub -A OPEN-0-0 -q qnvidia -l select=10:ncpus=16 ./myjob ``` -In this example, we allocate 10 nvidia accelerated nodes, 16 cores per node, for 24 hours. We allocate these resources via the qnvidia queue. Jobscript myjob will be executed on the first node in the allocation. +In this example, we allocate 10 nvidia accelerated nodes, 16 cores per node, for 24 hours. We allocate these resources via the qnvidia queue. Jobscript myjob will be executed on the first node in the allocation. ```bash $ qsub -A OPEN-0-0 -q qfree -l select=10:ncpus=16 ./myjob ``` -In this example, we allocate 10 nodes, 16 cores per node, for 12 hours. We allocate these resources via the qfree queue. It is not required that the project OPEN-0-0 has any available resources left. Consumed resources are still accounted for. Jobscript myjob will be executed on the first node in the allocation. +In this example, we allocate 10 nodes, 16 cores per node, for 12 hours. We allocate these resources via the qfree queue. It is not required that the project OPEN-0-0 has any available resources left. Consumed resources are still accounted for. Jobscript myjob will be executed on the first node in the allocation. -All qsub options may be [saved directly into the jobscript](job-submission-and-execution/#PBSsaved). In such a case, no options to qsub are needed. +All qsub options may be [saved directly into the jobscript](#example-jobscript-for-mpi-calculation-with-preloaded-inputs). In such a case, no options to qsub are needed. ```bash $ qsub ./myjob @@ -61,10 +60,9 @@ By default, the PBS batch system sends an e-mail only when the job is aborted. D $ qsub -m n ``` -Advanced job placement ----------------------- +## Advanced Job Placement -### Placement by name +### Placement by Name Specific nodes may be allocated via the PBS @@ -74,14 +72,14 @@ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16:host=cn171+1:ncpus=16:host=cn172 In this example, we allocate nodes cn171 and cn172, all 16 cores per node, for 24 hours. Consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. The resources will be available interactively. -### Placement by CPU type +### Placement by CPU Type Nodes equipped with Intel Xeon E5-2665 CPU have base clock frequency 2.4GHz, nodes equipped with Intel Xeon E5-2470 CPU have base frequency 2.3 GHz (see section Compute Nodes for details). Nodes may be selected via the PBS resource attribute cpu_freq . -|CPU Type|base freq.|Nodes|cpu_freq attribute| -|---|---|---|---| -|Intel Xeon E5-2665|2.4GHz|cn[1-180], cn[208-209]|24| -|Intel Xeon E5-2470|2.3GHz|cn[181-207]|23| +| CPU Type | base freq. | Nodes | cpu_freq attribute | +| ------------------ | ---------- | ---------------------- | ------------------ | +| Intel Xeon E5-2665 | 2.4GHz | cn[1-180], cn[208-209] | 24 | +| Intel Xeon E5-2470 | 2.3GHz | cn[181-207] | 23 | ```bash $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16:cpu_freq=24 -I @@ -89,7 +87,7 @@ $ qsub -A OPEN-0-0 -q qprod -l select=4:ncpus=16:cpu_freq=24 -I In this example, we allocate 4 nodes, 16 cores, selecting only the nodes with Intel Xeon E5-2665 CPU. -### Placement by IB switch +### Placement by IB Switch Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](../network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and provides for unbiased, most efficient network communication. @@ -103,10 +101,9 @@ We recommend allocating compute nodes of a single switch when best possible comp In this example, we request all the 18 nodes sharing the isw11 switch for 24 hours. Full chassis will be allocated. -Advanced job handling ---------------------- +## Advanced Job Handling -### Selecting Turbo Boost off +### Selecting Turbo Boost Off Intel Turbo Boost Technology is on by default. We strongly recommend keeping the default. @@ -118,7 +115,7 @@ If necessary (such as in case of benchmarking) you can disable the Turbo for all More about the Intel Turbo Boost in the TurboBoost section -### Advanced examples +### Advanced Examples In the following example, we select an allocation for benchmarking a very special and demanding MPI program. We request Turbo off, 2 full chassis of compute nodes (nodes sharing the same IB switches) for 30 minutes: @@ -129,14 +126,14 @@ In the following example, we select an allocation for benchmarking a very specia -N Benchmark ./mybenchmark ``` -The MPI processes will be distributed differently on the nodes connected to the two switches. On the isw10 nodes, we will run 1 MPI process per node 16 threads per process, on isw20 nodes we will run 16 plain MPI processes. +The MPI processes will be distributed differently on the nodes connected to the two switches. On the isw10 nodes, we will run 1 MPI process per node 16 threads per process, on isw20 nodes we will run 16 plain MPI processes. Although this example is somewhat artificial, it demonstrates the flexibility of the qsub command options. -Job Management --------------- -!!! Note "Note" - Check status of your jobs using the **qstat** and **check-pbs-jobs** commands +## Job Management + +!!! note + Check status of your jobs using the **qstat** and **check-pbs-jobs** commands ```bash $ qstat -a @@ -151,15 +148,15 @@ Example: $ qstat -a srv11: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -16287.srv11 user1 qlong job1 6183 4 64 -- 144:0 R 38:25 -16468.srv11 user1 qlong job2 8060 4 64 -- 144:0 R 17:44 -16547.srv11 user2 qprod job3x 13516 2 32 -- 48:00 R 00:58 +16287.srv11 user1 qlong job1 6183 4 64 -- 144:0 R 38:25 +16468.srv11 user1 qlong job2 8060 4 64 -- 144:0 R 17:44 +16547.srv11 user2 qprod job3x 13516 2 32 -- 48:00 R 00:58 ``` -In this example user1 and user2 are running jobs named job1, job2 and job3x. The jobs job1 and job2 are using 4 nodes, 16 cores per node each. The job1 already runs for 38 hours and 25 minutes, job2 for 17 hours 44 minutes. The job1 already consumed 64*38.41 = 2458.6 core hours. The job3x already consumed 0.96*32 = 30.93 core hours. These consumed core hours will be accounted on the respective project accounts, regardless of whether the allocated cores were actually used for computations. +In this example user1 and user2 are running jobs named job1, job2 and job3x. The jobs job1 and job2 are using 4 nodes, 16 cores per node each. The job1 already runs for 38 hours and 25 minutes, job2 for 17 hours 44 minutes. The job1 already consumed `64 x 38.41 = 2458.6` core hours. The job3x already consumed `0.96 x 32 = 30.93` core hours. These consumed core hours will be accounted on the respective project accounts, regardless of whether the allocated cores were actually used for computations. Check status of your jobs using check-pbs-jobs command. Check presence of user's PBS jobs' processes on execution hosts. Display load, processes. Display job standard and error output. Continuously display (tail -f) job standard or error output. @@ -216,8 +213,8 @@ Run loop 3 In this example, we see actual output (some iteration loops) of the job 35141.dm2 -!!! Note "Note" - Manage your queued or running jobs, using the **qhold**, **qrls**, **qdel**, **qsig** or **qalter** commands +!!! note + Manage your queued or running jobs, using the **qhold**, **qrls**, **qdel**, **qsig** or **qalter** commands You may release your allocation at any time, using qdel command @@ -237,28 +234,27 @@ Learn more by reading the pbs man page $ man pbs_professional ``` -Job Execution -------------- +## Job Execution ### Jobscript -!!! Note "Note" - Prepare the jobscript to run batch jobs in the PBS queue system +!!! note + Prepare the jobscript to run batch jobs in the PBS queue system The Jobscript is a user made script, controlling sequence of commands for executing the calculation. It is often written in bash, other scripts may be used as well. The jobscript is supplied to PBS **qsub** command as an argument and executed by the PBS Professional workload manager. -!!! Note "Note" - The jobscript or interactive shell is executed on first of the allocated nodes. +!!! note + The jobscript or interactive shell is executed on first of the allocated nodes. ```bash $ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob $ qstat -n -u username srv11: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -15209.srv11 username qexp Name0 5530 4 64 -- 01:00 R 00:00 +15209.srv11 username qexp Name0 5530 4 64 -- 01:00 R 00:00 cn17/0*16+cn108/0*16+cn109/0*16+cn110/0*16 ``` @@ -277,8 +273,8 @@ $ pwd In this example, 4 nodes were allocated interactively for 1 hour via the qexp queue. The interactive shell is executed in the home directory. -!!! Note "Note" - All nodes within the allocation may be accessed via ssh. Unallocated nodes are not accessible to user. +!!! note + All nodes within the allocation may be accessed via ssh. Unallocated nodes are not accessible to user. The allocated nodes are accessible via ssh from login nodes. The nodes may access each other via ssh as well. @@ -309,8 +305,8 @@ In this example, the hostname program is executed via pdsh from the interactive ### Example Jobscript for MPI Calculation -!!! Note "Note" - Production jobs must use the /scratch directory for I/O +!!! note + Production jobs must use the /scratch directory for I/O The recommended way to run production jobs is to change to /scratch directory early in the jobscript, copy all inputs to /scratch, execute the calculations and copy outputs to home directory. @@ -341,13 +337,15 @@ exit In this example, some directory on the /home holds the input file input and executable mympiprog.x . We create a directory myjob on the /scratch filesystem, copy input and executable files from the /home directory where the qsub was invoked ($PBS_O_WORKDIR) to /scratch, execute the MPI programm mympiprog.x and copy the output file back to the /home directory. The mympiprog.x is executed as one process per node, on all allocated nodes. -!!! Note "Note" - Consider preloading inputs and executables onto [shared scratch](storage/) before the calculation starts. +!!! note + Consider preloading inputs and executables onto [shared scratch](storage/) before the calculation starts. In some cases, it may be impractical to copy the inputs to scratch and outputs to home. This is especially true when very large input and output files are expected, or when the files should be reused by a subsequent calculation. In such a case, it is users responsibility to preload the input files on shared /scratch before the job submission and retrieve the outputs manually, after all calculations are finished. -!!! Note "Note" - Store the qsub options within the jobscript. Use **mpiprocs** and **ompthreads** qsub options to control the MPI job execution. +!!! note + Store the qsub options within the jobscript. Use **mpiprocs** and **ompthreads** qsub options to control the MPI job execution. + +### Example Jobscript for MPI Calculation With Preloaded Inputs Example jobscript for an MPI job with preloaded inputs and executables, options for qsub are stored within the script : @@ -374,13 +372,13 @@ exit In this example, input and executable files are assumed preloaded manually in /scratch/$USER/myjob directory. Note the **mpiprocs** and **ompthreads** qsub options, controlling behavior of the MPI execution. The mympiprog.x is executed as one process per node, on all 100 allocated nodes. If mympiprog.x implements OpenMP threads, it will run 16 threads per node. -More information is found in the [Running OpenMPI](../software/mpi/Running_OpenMPI/) and [Running MPICH2](../software/mpi/running-mpich2/) +More information is found in the [Running OpenMPI](software/mpi/Running_OpenMPI/) and [Running MPICH2](software/mpi/running-mpich2/) sections. ### Example Jobscript for Single Node Calculation -!!! Note "Note" - Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. +!!! note + Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. Example jobscript for single node calculation, using [local scratch](storage/) on the node: diff --git a/docs.it4i/anselm-cluster-documentation/network.md b/docs.it4i/anselm/network.md similarity index 80% rename from docs.it4i/anselm-cluster-documentation/network.md rename to docs.it4i/anselm/network.md index 24fe0881bab6b1629a3096493d409f5c5b0702b9..a2af06f97a85472d327eeffc4a743d5eb70d6bb1 100644 --- a/docs.it4i/anselm-cluster-documentation/network.md +++ b/docs.it4i/anselm/network.md @@ -1,33 +1,31 @@ -Network -======= +# Network All compute and login nodes of Anselm are interconnected by [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) QDR network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet) network. Both networks may be used to transfer user data. -InfiniBand Network ------------------- +## InfiniBand Network + All compute and login nodes of Anselm are interconnected by a high-bandwidth, low-latency [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) QDR network (IB 4 x QDR, 40 Gbps). The network topology is a fully non-blocking fat-tree. The compute nodes may be accessed via the InfiniBand network using ib0 network interface, in address range 10.2.1.1-209. The MPI may be used to establish native InfiniBand connection among the nodes. -!!! Note "Note" - The network provides **2170 MB/s** transfer rates via the TCP connection (single stream) and up to **3600 MB/s** via native InfiniBand protocol. +!!! note + The network provides **2170 MB/s** transfer rates via the TCP connection (single stream) and up to **3600 MB/s** via native InfiniBand protocol. The Fat tree topology ensures that peak transfer rates are achieved between any two nodes, independent of network traffic exchanged among other nodes concurrently. -Ethernet Network ----------------- +## Ethernet Network + The compute nodes may be accessed via the regular Gigabit Ethernet network interface eth0, in address range 10.1.1.1-209, or by using aliases cn1-cn209. The network provides **114 MB/s** transfer rates via the TCP connection. -Example -------- +## Example ```bash $ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob $ qstat -n -u username - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -15209.srv11 username qexp Name0 5530 4 64 -- 01:00 R 00:00 +15209.srv11 username qexp Name0 5530 4 64 -- 01:00 R 00:00 cn17/0*16+cn108/0*16+cn109/0*16+cn110/0*16 $ ssh 10.2.1.110 diff --git a/docs.it4i/anselm-cluster-documentation/prace.md b/docs.it4i/anselm/prace.md similarity index 68% rename from docs.it4i/anselm-cluster-documentation/prace.md rename to docs.it4i/anselm/prace.md index 74520cdd222c823197295e242c7aa34e942983b8..c4d3bce0f399ff78168f86fc8fb0c5f3e0a95999 100644 --- a/docs.it4i/anselm-cluster-documentation/prace.md +++ b/docs.it4i/anselm/prace.md @@ -1,28 +1,26 @@ -PRACE User Support -================== +# PRACE User Support -Intro ------ -PRACE users coming to Anselm as to TIER-1 system offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](../get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required. +## Intro + +PRACE users coming to Anselm as to TIER-1 system offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](../general/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required. All general [PRACE User Documentation](http://www.prace-ri.eu/user-documentation/) should be read before continuing reading the local documentation here. -Help and Support --------------------- +## Help and Support + If you have any troubles, need information, request support or want to install additional software, please use [PRACE Helpdesk](http://www.prace-ri.eu/helpdesk-guide264/). Information about the local services are provided in the [introduction of general user documentation](introduction/). Please keep in mind, that standard PRACE accounts don't have a password to access the web interface of the local (IT4Innovations) request tracker and thus a new ticket should be created by sending an e-mail to support[at]it4i.cz. -Obtaining Login Credentials ---------------------------- +## Obtaining Login Credentials + In general PRACE users already have a PRACE account setup through their HOMESITE (institution from their country) as a result of rewarded PRACE project proposal. This includes signed PRACE AuP, generated and registered certificates, etc. If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on the Anselm cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so please see the corresponding section of the general documentation here. -Accessing the cluster ---------------------- +## Accessing the Cluster -### Access with GSI-SSH +### Access With GSI-SSH For all PRACE users the method for interactive access (login) and data transfer based on grid services from Globus Toolkit (GSI SSH and GridFTP) is supported. @@ -30,11 +28,11 @@ The user will need a valid certificate and to be present in the PRACE LDAP (plea Most of the information needed by PRACE users accessing the Anselm TIER-1 system can be found here: -- [General user's FAQ](http://www.prace-ri.eu/Users-General-FAQs) -- [Certificates FAQ](http://www.prace-ri.eu/Certificates-FAQ) -- [Interactive access using GSISSH](http://www.prace-ri.eu/Interactive-Access-Using-gsissh) -- [Data transfer with GridFTP](http://www.prace-ri.eu/Data-Transfer-with-GridFTP-Details) -- [Data transfer with gtransfer](http://www.prace-ri.eu/Data-Transfer-with-gtransfer) +* [General user's FAQ](http://www.prace-ri.eu/Users-General-FAQs) +* [Certificates FAQ](http://www.prace-ri.eu/Certificates-FAQ) +* [Interactive access using GSISSH](http://www.prace-ri.eu/Interactive-Access-Using-gsissh) +* [Data transfer with GridFTP](http://www.prace-ri.eu/Data-Transfer-with-GridFTP-Details) +* [Data transfer with gtransfer](http://www.prace-ri.eu/Data-Transfer-with-gtransfer) Before you start to use any of the services don't forget to create a proxy certificate from your certificate: @@ -50,15 +48,15 @@ To check whether your proxy certificate is still valid (by default it's valid 12 To access Anselm cluster, two login nodes running GSI SSH service are available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners). -**Access from PRACE network:** +#### Access From PRACE Network: It is recommended to use the single DNS name anselm-prace.it4i.cz which is distributed between the two login nodes. If needed, user can login directly to one of the login nodes. The addresses are: -|Login address|Port|Protocol|Login node| -|---|---|---|---| -|anselm-prace.it4i.cz|2222|gsissh|login1 or login2| -|login1-prace.anselm.it4i.cz|2222|gsissh|login1| -|login2-prace.anselm.it4i.cz|2222|gsissh|login2| +| Login address | Port | Protocol | Login node | +| --------------------------- | ---- | -------- | ---------------- | +| anselm-prace.it4i.cz | 2222 | gsissh | login1 or login2 | +| login1-prace.anselm.it4i.cz | 2222 | gsissh | login1 | +| login2-prace.anselm.it4i.cz | 2222 | gsissh | login2 | ```bash $ gsissh -p 2222 anselm-prace.it4i.cz @@ -70,15 +68,15 @@ When logging from other PRACE system, the prace_service script can be used: $ gsissh `prace_service -i -s anselm` ``` -**Access from public Internet:** +#### Access From Public Internet: It is recommended to use the single DNS name anselm.it4i.cz which is distributed between the two login nodes. If needed, user can login directly to one of the login nodes. The addresses are: -|Login address|Port|Protocol|Login node| -|---|---|---|---| -|anselm.it4i.cz|2222|gsissh|login1 or login2| -|login1.anselm.it4i.cz|2222|gsissh|login1| -|login2.anselm.it4i.cz|2222|gsissh|login2| +| Login address | Port | Protocol | Login node | +| --------------------- | ---- | -------- | ---------------- | +| anselm.it4i.cz | 2222 | gsissh | login1 or login2 | +| login1.anselm.it4i.cz | 2222 | gsissh | login1 | +| login2.anselm.it4i.cz | 2222 | gsissh | login2 | ```bash $ gsissh -p 2222 anselm.it4i.cz @@ -102,7 +100,7 @@ Although the preferred and recommended file transfer mechanism is [using GridFTP $ gsiscp -P 2222 anselm-prace.it4i.cz:_ANSELM_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_ ``` -### Access to X11 applications (VNC) +### Access to X11 Applications (VNC) If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, please see the section in general documentation. @@ -112,26 +110,26 @@ If the user uses GSI SSH based access, then the procedure is similar to the SSH $ gsissh -p 2222 anselm.it4i.cz -L 5961:localhost:5961 ``` -### Access with SSH +### Access With SSH After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information please see the section in general documentation. -File transfers ------------------- +## File Transfers + PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, please see the section in the general documentation. Apart from the standard mechanisms, for PRACE users to transfer data to/from Anselm cluster, a GridFTP server running Globus Toolkit GridFTP service is available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners). There's one control server and three backend servers for striping and/or backup in case one of them would fail. -**Access from PRACE network:** +### Access From PRACE Network -|Login address|Port|Node role| -|---|---|---| -|gridftp-prace.anselm.it4i.cz|2812|Front end /control server| -|login1-prace.anselm.it4i.cz|2813|Backend / data mover server| -|login2-prace.anselm.it4i.cz|2813|Backend / data mover server| -|dm1-prace.anselm.it4i.cz|2813|Backend / data mover server| +| Login address | Port | Node role | +| ---------------------------- | ---- | --------------------------- | +| gridftp-prace.anselm.it4i.cz | 2812 | Front end /control server | +| login1-prace.anselm.it4i.cz | 2813 | Backend / data mover server | +| login2-prace.anselm.it4i.cz | 2813 | Backend / data mover server | +| dm1-prace.anselm.it4i.cz | 2813 | Backend / data mover server | Copy files **to** Anselm by running the following commands on your local machine: @@ -139,7 +137,7 @@ Copy files **to** Anselm by running the following commands on your local machine $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ @@ -151,20 +149,20 @@ Copy files **from** Anselm: $ globus-url-copy gsiftp://gridftp-prace.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy gsiftp://`prace_service -i -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ ``` -**Access from public Internet:** +### Access From Public Internet -|Login address|Port|Node role| -|---|---|---| -|gridftp.anselm.it4i.cz|2812|Front end /control server| -|login1.anselm.it4i.cz|2813|Backend / data mover server| -|login2.anselm.it4i.cz|2813|Backend / data mover server| -|dm1.anselm.it4i.cz|2813|Backend / data mover server| +| Login address | Port | Node role | +| ---------------------- | ---- | --------------------------- | +| gridftp.anselm.it4i.cz | 2812 | Front end /control server | +| login1.anselm.it4i.cz | 2813 | Backend / data mover server | +| login2.anselm.it4i.cz | 2813 | Backend / data mover server | +| dm1.anselm.it4i.cz | 2813 | Backend / data mover server | Copy files **to** Anselm by running the following commands on your local machine: @@ -172,7 +170,7 @@ Copy files **to** Anselm by running the following commands on your local machine $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ @@ -184,7 +182,7 @@ Copy files **from** Anselm: $ globus-url-copy gsiftp://gridftp.anselm.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy gsiftp://`prace_service -e -f anselm`/home/prace/_YOUR_ACCOUNT_ON_ANSELM_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ @@ -192,16 +190,16 @@ Or by using prace_service script: Generally both shared file systems are available through GridFTP: -|File system mount point|Filesystem|Comment| -|---|---|---| -|/home|Lustre|Default HOME directories of users in format /home/prace/login/| -|/scratch|Lustre|Shared SCRATCH mounted on the whole cluster| +| File system mount point | Filesystem | Comment | +| ----------------------- | ---------- | -------------------------------------------------------------- | +| /home | Lustre | Default HOME directories of users in format /home/prace/login/ | +| /scratch | Lustre | Shared SCRATCH mounted on the whole cluster | More information about the shared file systems is available [here](storage/). -Usage of the cluster --------------------- - There are some limitations for PRACE user when using the cluster. By default PRACE users aren't allowed to access special queues in the PBS Pro to have high priority or exclusive access to some special equipment like accelerated nodes and high memory (fat) nodes. There may be also restrictions obtaining a working license for the commercial software installed on the cluster, mostly because of the license agreement or because of insufficient amount of licenses. +## Usage of the Cluster + +There are some limitations for PRACE user when using the cluster. By default PRACE users aren't allowed to access special queues in the PBS Pro to have high priority or exclusive access to some special equipment like accelerated nodes and high memory (fat) nodes. There may be also restrictions obtaining a working license for the commercial software installed on the cluster, mostly because of the license agreement or because of insufficient amount of licenses. For production runs always use scratch file systems, either the global shared or the local ones. The available file systems are described [here](hardware-overview/). @@ -209,7 +207,7 @@ For production runs always use scratch file systems, either the global shared or All system wide installed software on the cluster is made available to the users via the modules. The information about the environment and modules usage is in this [section of general documentation](environment-and-modules/). -PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/PRACE-common-production). +PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/prace-common-production-environment/). ```bash $ module load prace @@ -221,11 +219,11 @@ General information about the resource allocation, job queuing and job execution For PRACE users, the default production run queue is "qprace". PRACE users can also use two other queues "qexp" and "qfree". -|queue|Active project|Project resources|Nodes|priority|authorization|walltime| -|---|---|---|---|---|---|---| -|**qexp** Express queue|no|none required|2 reserved, 8 total|high|no|1 / 1h| -|**qprace** Production queue|yes|> 0|178 w/o accelerator|medium|no|24 / 48 h| -|**qfree** Free resource queue|yes|none required|178 w/o accelerator|very low|no| 12 / 12 h| +| queue | Active project | Project resources | Nodes | priority | authorization | walltime | +| ----------------------------- | -------------- | ----------------- | ------------------- | -------- | ------------- | --------- | +| **qexp** Express queue | no | none required | 2 reserved, 8 total | high | no | 1 / 1h | +| **qprace** Production queue | yes | > 0 | 178 w/o accelerator | medium | no | 24 / 48 h | +| **qfree** Free resource queue | yes | none required | 178 w/o accelerator | very low | no | 12 / 12 h | **qprace**, the PRACE: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprace. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprace is 12 hours. If the job needs longer time, it must use checkpoint/restart functionality. @@ -235,10 +233,13 @@ The resources that are currently subject to accounting are the core hours. The c PRACE users should check their project accounting using the [PRACE Accounting Tool (DART)](http://www.prace-ri.eu/accounting-report-tool/). -Users who have undergone the full local registration procedure (including signing the IT4Innovations Acceptable Use Policy) and who have received local password may check at any time, how many core-hours have been consumed by themselves and their projects using the command "it4ifree". Please note that you need to know your user password to use the command and that the displayed core hours are "system core hours" which differ from PRACE "standardized core hours". +Users who have undergone the full local registration procedure (including signing the IT4Innovations Acceptable Use Policy) and who have received local password may check at any time, how many core-hours have been consumed by themselves and their projects using the command "it4ifree". + +!!! note + You need to know your user password to use the command. Displayed core hours are "system core hours" which differ from PRACE "standardized core hours". -!!! Note "Note" - The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> +!!! hint + The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> ```bash $ it4ifree diff --git a/docs.it4i/anselm-cluster-documentation/remote-visualization.md b/docs.it4i/anselm/remote-visualization.md similarity index 54% rename from docs.it4i/anselm-cluster-documentation/remote-visualization.md rename to docs.it4i/anselm/remote-visualization.md index 0de276a0a1f432b62d48844b5de5ec5eced32487..7b0149fce735ac31592baa6f232cf3be2ffc5a54 100644 --- a/docs.it4i/anselm-cluster-documentation/remote-visualization.md +++ b/docs.it4i/anselm/remote-visualization.md @@ -1,34 +1,31 @@ -Remote visualization service -============================ +# Remote visualization service + +## Introduction -Introduction ------------- The goal of this service is to provide the users a GPU accelerated use of OpenGL applications, especially for pre- and post- processing work, where not only the GPU performance is needed but also fast access to the shared file systems of the cluster and a reasonable amount of RAM. The service is based on integration of open source tools VirtualGL and TurboVNC together with the cluster's job scheduler PBS Professional. Currently two compute nodes are dedicated for this service with following configuration for each node: -|[**Visualization node configuration**](compute-nodes/)|| -|---|---| -|CPU|2 x Intel Sandy Bridge E5-2670, 2.6 GHz| -|Processor cores|16 (2 x 8 cores)| -|RAM|64 GB, min. 4 GB per core| -|GPU|NVIDIA Quadro 4000, 2 GB RAM| -|Local disk drive|yes - 500 GB| -|Compute network|InfiniBand QDR| +| [**Visualization node configuration**](compute-nodes/) | | +| ------------------------------------------------------ | --------------------------------------- | +| CPU | 2 x Intel Sandy Bridge E5-2670, 2.6 GHz | +| Processor cores | 16 (2 x 8 cores) | +| RAM | 64 GB, min. 4 GB per core | +| GPU | NVIDIA Quadro 4000, 2 GB RAM | +| Local disk drive | yes - 500 GB | +| Compute network | InfiniBand QDR | -Schematic overview ------------------- +## Schematic Overview   -How to use the service ----------------------- +## How to Use the Service -### Setup and start your own TurboVNC server. +### Setup and Start Your Own TurboVNC Server TurboVNC is designed and implemented for cooperation with VirtualGL and available for free for all major platforms. For more information and download, please refer to: <http://sourceforge.net/projects/turbovnc/> @@ -36,15 +33,16 @@ TurboVNC is designed and implemented for cooperation with VirtualGL and availabl The procedure is: -#### 1. Connect to a login node. +#### 1. Connect to a Login Node Please [follow the documentation](shell-and-data-access/). -#### 2. Run your own instance of TurboVNC server. +#### 2. Run Your Own Instance of TurboVNC Server To have the OpenGL acceleration, **24 bit color depth must be used**. Otherwise only the geometry (desktop size) definition is needed. -*At first VNC server run you need to define a password.* +!!! hint + At first VNC server run you need to define a password. This example defines desktop with dimensions 1200x700 pixels and 24 bit color depth. @@ -58,7 +56,7 @@ Starting applications specified in /home/username/.vnc/xstartup.turbovnc Log file is /home/username/.vnc/login2:1.log ``` -#### 3. Remember which display number your VNC server runs (you will need it in the future to stop the server). +#### 3. Remember Which Display Number Your VNC Server Runs (You Will Need It in the Future to Stop the Server) ```bash $ vncserver -list @@ -71,7 +69,7 @@ X DISPLAY # PROCESS ID In this example the VNC server runs on display **:1**. -#### 4. Remember the exact login node, where your VNC server runs. +#### 4. Remember the Exact Login Node, Where Your VNC Server Runs ```bash $ uname -n @@ -80,7 +78,7 @@ login2 In this example the VNC server runs on **login2**. -#### 5. Remember on which TCP port your own VNC server is running. +#### 5. Remember on Which TCP Port Your Own VNC Server Is Running To get the port you have to look to the log file of your VNC server. @@ -91,22 +89,23 @@ $ grep -E "VNC.*port" /home/username/.vnc/login2:1.log In this example the VNC server listens on TCP port **5901**. -#### 6. Connect to the login node where your VNC server runs with SSH to tunnel your VNC session. +#### 6. Connect to the Login Node Where Your VNC Server Runs With SSH to Tunnel Your VNC Session Tunnel the TCP port on which your VNC server is listenning. ```bash $ ssh login2.anselm.it4i.cz -L 5901:localhost:5901 ``` + x-window-system/ -*If you use Windows and Putty, please refer to port forwarding setup in the documentation:* -[x-window-and-vnc#section-12](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/) +If you use Windows and Putty, please refer to port forwarding setup in the documentation: +[x-window-and-vnc#section-12](../general/accessing-the-clusters/graphical-user-interface/x-window-system/) -#### 7. If you don't have Turbo VNC installed on your workstation. +#### 7. If You Don't Have Turbo VNC Installed on Your Workstation Get it from: <http://sourceforge.net/projects/turbovnc/> -#### 8. Run TurboVNC Viewer from your workstation. +#### 8. Run TurboVNC Viewer From Your Workstation Mind that you should connect through the SSH tunneled port. In this example it is 5901 on your workstation (localhost). @@ -114,36 +113,39 @@ Mind that you should connect through the SSH tunneled port. In this example it i $ vncviewer localhost:5901 ``` -*If you use Windows version of TurboVNC Viewer, just run the Viewer and use address **localhost:5901**.* +If you use Windows version of TurboVNC Viewer, just run the Viewer and use address **localhost:5901**. -#### 9. Proceed to the chapter "Access the visualization node." +#### 9. Proceed to the Chapter "Access the Visualization Node" -*Now you should have working TurboVNC session connected to your workstation.* +Now you should have working TurboVNC session connected to your workstation. -#### 10. After you end your visualization session. +#### 10. After You End Your Visualization Session -*Don't forget to correctly shutdown your own VNC server on the login node!* +Don't forget to correctly shutdown your own VNC server on the login node! ```bash $ vncserver -kill :1 ``` -Access the visualization node ------------------------------ +### Access the Visualization Node + **To access the node use a dedicated PBS Professional scheduler queue qviz**. The queue has following properties: - |queue |active project |project resources |nodes|min ncpus|priority|authorization|walltime | - | --- | --- | --- | --- | --- | --- | --- | --- | - |**qviz** Visualization queue |yes |none required |2 |4 |150 |no |1 hour / 8 hours | +| queue | active project | project resources | nodes | min ncpus | priority | authorization | walltime | +| ---------------------------- | -------------- | ----------------- | ----- | --------- | -------- | ------------- | ---------------- | +| **qviz** Visualization queue | yes | none required | 2 | 4 | 150 | no | 1 hour / 8 hours | -Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 16 GB of RAM and 1/4 of the GPU capacity. *If more GPU power or RAM is required, it is recommended to allocate one whole node per user, so that all 16 cores, whole RAM and whole GPU is exclusive. This is currently also the maximum allowed allocation per one user. One hour of work is allocated by default, the user may ask for 2 hours maximum.* +Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 16 GB of RAM and 1/4 of the GPU capacity. + +!!! note + If more GPU power or RAM is required, it is recommended to allocate one whole node per user, so that all 16 cores, whole RAM and whole GPU is exclusive. This is currently also the maximum allowed allocation per one user. One hour of work is allocated by default, the user may ask for 2 hours maximum. To access the visualization node, follow these steps: -#### 1. In your VNC session, open a terminal and allocate a node using PBSPro qsub command. +#### 1. In Your VNC Session, Open a Terminal and Allocate a Node Using PBSPro qsub Command -*This step is necessary to allow you to proceed with next steps.* +This step is necessary to allow you to proceed with next steps. ```bash $ qsub -I -q qviz -A PROJECT_ID @@ -155,7 +157,7 @@ In this example the default values for CPU cores and usage time are used. $ qsub -I -q qviz -A PROJECT_ID -l select=1:ncpus=16 -l walltime=02:00:00 ``` -*Substitute **PROJECT_ID** with the assigned project identification string.* +Substitute **PROJECT_ID** with the assigned project identification string. In this example a whole node for 2 hours is requested. @@ -168,7 +170,7 @@ srv8 In this example the visualization session was assigned to node **srv8**. -#### 2. In your VNC session open another terminal (keep the one with interactive PBSPro job open). +#### 2. In Your VNC Session Open Another Terminal (Keep the One With Interactive PBSPro Job Open) Setup the VirtualGL connection to the node, which PBSPro allocated for our job. @@ -178,45 +180,45 @@ $ vglconnect srv8 You will be connected with created VirtualGL tunnel to the visualization ode, where you will have a shell. -#### 3. Load the VirtualGL module. +#### 3. Load the VirtualGL Module ```bash $ module load virtualgl/2.4 ``` -#### 4. Run your desired OpenGL accelerated application using VirtualGL script "vglrun". +#### 4. Run Your Desired OpenGL Accelerated Application Using VirtualGL Script "Vglrun" ```bash $ vglrun glxgears ``` -Please note, that if you want to run an OpenGL application which is vailable through modules, you need at first load the respective module. . g. to run the **Mentat** OpenGL application from **MARC** software ackage use: +If you want to run an OpenGL application which is vailable through modules, you need at first load the respective module. E.g. to run the **Mentat** OpenGL application from **MARC** software ackage use: ```bash $ module load marc/2013.1 $ vglrun mentat ``` -#### 5. After you end your work with the OpenGL application. +#### 5. After You End Your Work With the OpenGL Application Just logout from the visualization node and exit both opened terminals nd end your VNC server session as described above. -Tips and Tricks ---------------- +## Tips and Tricks + If you want to increase the responsibility of the visualization, please adjust your TurboVNC client settings in this way:  To have an idea how the settings are affecting the resulting picture utility three levels of "JPEG image quality" are demonstrated: -1. JPEG image quality = 30 +** JPEG image quality = 30 **  -2. JPEG image quality = 15 +** JPEG image quality = 15 **  -3. JPEG image quality = 10 +** JPEG image quality = 10 **  diff --git a/docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md b/docs.it4i/anselm/resource-allocation-and-job-execution.md similarity index 76% rename from docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md rename to docs.it4i/anselm/resource-allocation-and-job-execution.md index 4d4d9f50da7853e981136cec182a274bdcae6f9e..b04a95ead56383feaf887c3121495c091d0d380a 100644 --- a/docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md +++ b/docs.it4i/anselm/resource-allocation-and-job-execution.md @@ -1,36 +1,35 @@ -Resource Allocation and Job Execution -===================================== +# Resource Allocation and Job Execution To run a [job](../introduction/), [computational resources](../introduction/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide. -Resources Allocation Policy ---------------------------- +## Resources Allocation Policy + The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](job-priority/) at Anselm ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are available to Anselm users: -- **qexp**, the Express queue -- **qprod**, the Production queue -- **qlong**, the Long queue, regula -- **qnvidia**, **qmic**, **qfat**, the Dedicated queues -- **qfree**, the Free resource utilization queue +* **qexp**, the Express queue +* **qprod**, the Production queue +* **qlong**, the Long queue, regula +* **qnvidia**, **qmic**, **qfat**, the Dedicated queues +* **qfree**, the Free resource utilization queue -!!! Note "Note" - Check the queue status at <https://extranet.it4i.cz/anselm/> +!!! note + Check the queue status at <https://extranet.it4i.cz/anselm/> Read more on the [Resource AllocationPolicy](resources-allocation-policy/) page. -Job submission and execution ----------------------------- -!!! Note "Note" - Use the **qsub** command to submit your jobs. +## Job Submission and Execution + +!!! note + Use the **qsub** command to submit your jobs. The qsub submits the job into the queue. The qsub command creates a request to the PBS Job manager for allocation of specified resources. The **smallest allocation unit is entire node, 16 cores**, with exception of the qexp queue. The resources will be allocated when available, subject to allocation policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.** Read more on the [Job submission and execution](job-submission-and-execution/) page. -Capacity computing ------------------- -!!! Note "Note" - Use Job arrays when running huge number of jobs. +## Capacity Computing + +!!! note + Use Job arrays when running huge number of jobs. Use GNU Parallel and/or Job arrays when running (many) single core jobs. diff --git a/docs.it4i/anselm-cluster-documentation/resources-allocation-policy.md b/docs.it4i/anselm/resources-allocation-policy.md similarity index 75% rename from docs.it4i/anselm-cluster-documentation/resources-allocation-policy.md rename to docs.it4i/anselm/resources-allocation-policy.md index 0576b4e4fed3b62e1a62339f4f961643e6e6428e..16cb7510d63075d413a19a9a9702ebbf23a4fb78 100644 --- a/docs.it4i/anselm-cluster-documentation/resources-allocation-policy.md +++ b/docs.it4i/anselm/resources-allocation-policy.md @@ -1,31 +1,30 @@ -Resources Allocation Policy -=========================== +# Resources Allocation Policy + +## Introduction -Resources Allocation Policy ---------------------------- The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The Fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview: -!!! Note "Note" - Check the queue status at https://extranet.it4i.cz/anselm/ +!!! note + Check the queue status at <https://extranet.it4i.cz/anselm/> - |queue |active project |project resources |nodes|min ncpus|priority|authorization|walltime | - | --- | --- | --- | --- | --- | --- | --- | --- | - |qexp |no |none required |2 reserved, 31 totalincluding MIC, GPU and FAT nodes |1 |150 |no |1 h | - |qprod |yes |0 |178 nodes w/o accelerator |16 |0 |no |24/48 h | - |qlong |yes |0 |60 nodes w/o accelerator |16 |0 |no |72/144 h | - |qnvidia, qmic, qfat |yes |0 |23 total qnvidia4 total qmic2 total qfat |16 |200 |yes |24/48 h | - |qfree |yes |none required |178 w/o accelerator |16 |-1024 |no |12 h | +| queue | active project | project resources | nodes | min ncpus | priority | authorization | walltime | +| ------------------- | -------------- | ----------------- | ---------------------------------------------------- | --------- | -------- | ------------- | -------- | +| qexp | no | none required | 2 reserved, 31 totalincluding MIC, GPU and FAT nodes | 1 | 150 | no | 1 h | +| qprod | yes | 0 | 178 nodes w/o accelerator | 16 | 0 | no | 24/48 h | +| qlong | yes | 0 | 60 nodes w/o accelerator | 16 | 0 | no | 72/144 h | +| qnvidia, qmic, qfat | yes | 0 | 23 total qnvidia4 total qmic2 total qfat | 16 | 200 | yes | 24/48 h | +| qfree | yes | none required | 178 w/o accelerator | 16 | -1024 | no | 12 h | -!!! Note "Note" - **The qfree queue is not free of charge**. [Normal accounting](#resources-accounting-policy) applies. However, it allows for utilization of free resources, once a Project exhausted all its allocated computational resources. This does not apply for Directors Discreation's projects (DD projects) by default. Usage of qfree after exhaustion of DD projects computational resources is allowed after request for this queue. +!!! note + **The qfree queue is not free of charge**. [Normal accounting](#resources-accounting-policy) applies. However, it allows for utilization of free resources, once a Project exhausted all its allocated computational resources. This does not apply for Directors Discreation's projects (DD projects) by default. Usage of qfree after exhaustion of DD projects computational resources is allowed after request for this queue. - **The qexp queue is equipped with the nodes not having the very same CPU clock speed.** Should you need the very same CPU speed, you have to select the proper nodes during the PSB job submission. +**The qexp queue is equipped with the nodes not having the very same CPU clock speed.** Should you need the very same CPU speed, you have to select the proper nodes during the PSB job submission. -- **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user, from a pool of nodes containing Nvidia accelerated nodes (cn181-203), MIC accelerated nodes (cn204-207) and Fat nodes with 512GB RAM (cn208-209). This enables to test and tune also accelerated code or code with higher RAM requirements. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour. -- **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, except the reserved ones. 178 nodes without accelerator are included. Full nodes, 16 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours. -- **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 60 nodes without acceleration may be accessed via the qlong queue. Full nodes, 16 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 * 48 h). -- **qnvidia**, qmic, qfat, the Dedicated queues: The queue qnvidia is dedicated to access the Nvidia accelerated nodes, the qmic to access MIC nodes and qfat the Fat nodes. It is required that active project with nonzero remaining resources is specified to enter these queues. 23 nvidia, 4 mic and 2 fat nodes are included. Full nodes, 16 cores per node are allocated. The queues run with very high priority, the jobs will be scheduled before the jobs coming from the qexp queue. An PI needs explicitly ask [support](https://support.it4i.cz/rt/) for authorization to enter the dedicated queues for all users associated to her/his Project. -- **qfree**, The Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue, however no remaining resources are required. Consumed resources will be accounted to the Project. Only 178 nodes without accelerator may be accessed from this queue. Full nodes, 16 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours. +* **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user, from a pool of nodes containing Nvidia accelerated nodes (cn181-203), MIC accelerated nodes (cn204-207) and Fat nodes with 512GB RAM (cn208-209). This enables to test and tune also accelerated code or code with higher RAM requirements. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour. +* **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, except the reserved ones. 178 nodes without accelerator are included. Full nodes, 16 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours. +* **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 60 nodes without acceleration may be accessed via the qlong queue. Full nodes, 16 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 x 48 h). +* **qnvidia**, qmic, qfat, the Dedicated queues: The queue qnvidia is dedicated to access the Nvidia accelerated nodes, the qmic to access MIC nodes and qfat the Fat nodes. It is required that active project with nonzero remaining resources is specified to enter these queues. 23 nvidia, 4 mic and 2 fat nodes are included. Full nodes, 16 cores per node are allocated. The queues run with very high priority, the jobs will be scheduled before the jobs coming from the qexp queue. An PI needs explicitly ask [support](https://support.it4i.cz/rt/) for authorization to enter the dedicated queues for all users associated to her/his Project. +* **qfree**, The Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue, however no remaining resources are required. Consumed resources will be accounted to the Project. Only 178 nodes without accelerator may be accessed from this queue. Full nodes, 16 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours. ### Notes @@ -35,9 +34,10 @@ Jobs that exceed the reserved wall clock time (Req'd Time) get killed automatica Anselm users may check current queue configuration at <https://extranet.it4i.cz/anselm/queues>. -### Queue status +### Queue Status ->Check the status of jobs, queues and compute nodes at <https://extranet.it4i.cz/anselm/> +!!! tip + Check the status of jobs, queues and compute nodes at <https://extranet.it4i.cz/anselm/>  @@ -59,9 +59,9 @@ Options: --get-node-ncpu-chart Print chart of allocated ncpus per node --summary Print summary - --get-server-details Print server + --get-server-details Print server --get-queues Print queues - --get-queues-details Print queues details + --get-queues-details Print queues details --get-reservations Print reservations --get-reservations-details Print reservations details @@ -92,7 +92,7 @@ Options: --get-user-ncpus Print number of allocated ncpus per user --get-qlist-nodes Print qlist nodes --get-qlist-nodeset Print qlist nodeset - --get-ibswitch-nodes Print ibswitch nodes + --get-ibswitch-nodes Print ibswitch nodes --get-ibswitch-nodeset Print ibswitch nodeset --state=STATE Only for given job state @@ -105,17 +105,16 @@ Options: --incl-finished Include finished jobs ``` -Resources Accounting Policy -------------------------------- +## Resources Accounting Policy -### The Core-Hour +### Core-Hours The resources that are currently subject to accounting are the core-hours. The core-hours are accounted on the wall clock basis. The accounting runs whenever the computational cores are allocated or blocked via the PBS Pro workload manager (the qsub command), regardless of whether the cores are actually used for any calculation. 1 core-hour is defined as 1 processor core allocated for 1 hour of wall clock time. Allocating a full node (16 cores) for 1 hour accounts to 16 core-hours. See example in the [Job submission and execution](job-submission-and-execution/) section. -### Check consumed resources +### Check Consumed Resources -!!! Note "Note" - The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> +!!! note + The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> User may check at any time, how many core-hours have been consumed by himself/herself and his/her projects. The command is available on clusters' login nodes. diff --git a/docs.it4i/anselm-cluster-documentation/shell-and-data-access.md b/docs.it4i/anselm/shell-and-data-access.md similarity index 61% rename from docs.it4i/anselm-cluster-documentation/shell-and-data-access.md rename to docs.it4i/anselm/shell-and-data-access.md index 912e49a165705ad5ec1d234b8618b0f6fec50051..260945ed1b896b1740a98f5de44a5e2caa9910e3 100644 --- a/docs.it4i/anselm-cluster-documentation/shell-and-data-access.md +++ b/docs.it4i/anselm/shell-and-data-access.md @@ -1,23 +1,22 @@ -Accessing the Cluster -============================== +# Accessing the Cluster + +## Shell Access -Shell Access ------------------ The Anselm cluster is accessed by SSH protocol via login nodes login1 and login2 at address anselm.it4i.cz. The login nodes may be addressed specifically, by prepending the login node name to the address. -|Login address|Port|Protocol|Login node| -|---|---|---|---| -|anselm.it4i.cz|22|ssh|round-robin DNS record for login1 and login2| -|login1.anselm.it4i.cz|22|ssh|login1| -|login2.anselm.it4i.cz|22|ssh|login2| +| Login address | Port | Protocol | Login node | +| --------------------- | ---- | -------- | -------------------------------------------- | +| anselm.it4i.cz | 22 | ssh | round-robin DNS record for login1 and login2 | +| login1.anselm.it4i.cz | 22 | ssh | login1 | +| login2.anselm.it4i.cz | 22 | ssh | login2 | -The authentication is by the [private key](../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) -!!! Note "Note" - Please verify SSH fingerprints during the first logon. They are identical on all login nodes: +!!! note + Please verify SSH fingerprints during the first logon. They are identical on all login nodes: - 29:b3:f4:64:b0:73:f5:6f:a7:85:0f:e0:0d:be:76:bf (DSA) - d4:6f:5c:18:f4:3f:70:ef:bc:fc:cc:2b:fd:13:36:b7 (RSA) + 29:b3:f4:64:b0:73:f5:6f:a7:85:0f:e0:0d:be:76:bf (DSA) + d4:6f:5c:18:f4:3f:70:ef:bc:fc:cc:2b:fd:13:36:b7 (RSA) Private key authentication: @@ -33,7 +32,7 @@ If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to local $ chmod 600 /path/to/id_rsa ``` -On **Windows**, use [PuTTY ssh client](../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md). +On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-access-and-data-transfer/putty.md). After logging in, you will see the command prompt: @@ -48,37 +47,37 @@ After logging in, you will see the command prompt: http://www.it4i.cz/?lang=en -Last login: Tue Jul 9 15:57:38 2013 from your-host.example.com +Last login: Tue Jul 9 15:57:38 2013 from your-host.example.com [username@login2.anselm ~]$ ``` Example to the cluster login: -!!! Note "Note" - The environment is **not** shared between login nodes, except for [shared filesystems](storage/#shared-filesystems). +!!! note + The environment is **not** shared between login nodes, except for [shared filesystems](storage/#shared-filesystems). + +## Data Transfer -Data Transfer -------------- Data in and out of the system may be transferred by the [scp](http://en.wikipedia.org/wiki/Secure_copy) and sftp protocols. (Not available yet.) In case large volumes of data are transferred, use dedicated data mover node dm1.anselm.it4i.cz for increased performance. -|Address|Port|Protocol| -|---|---|---| -|anselm.it4i.cz|22|scp, sftp| -|login1.anselm.it4i.cz|22|scp, sftp| -|login2.anselm.it4i.cz|22|scp, sftp| -|dm1.anselm.it4i.cz|22|scp, sftp| +| Address | Port | Protocol | +| --------------------- | ---- | --------- | +| anselm.it4i.cz | 22 | scp, sftp | +| login1.anselm.it4i.cz | 22 | scp, sftp | +| login2.anselm.it4i.cz | 22 | scp, sftp | +| dm1.anselm.it4i.cz | 22 | scp, sftp | -The authentication is by the [private key](../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) -!!! Note "Note" - Data transfer rates up to **160MB/s** can be achieved with scp or sftp. +!!! note + Data transfer rates up to **160MB/s** can be achieved with scp or sftp. 1TB may be transferred in 1:50h. To achieve 160MB/s transfer rates, the end user must be connected by 10G line all the way to IT4Innovations and use computer with fast processor for the transfer. Using Gigabit ethernet connection, up to 110MB/s may be expected. Fast cipher (aes128-ctr) should be used. -!!! Note "Note" - If you experience degraded data transfer performance, consult your local network provider. +!!! note + If you experience degraded data transfer performance, consult your local network provider. On linux or Mac, use scp or sftp client to transfer the data to Anselm: @@ -116,30 +115,28 @@ On Windows, use [WinSCP client](http://winscp.net/eng/download.php) to transfer More information about the shared file systems is available [here](storage/). +## Connection Restrictions -Connection restrictions ------------------------ Outgoing connections, from Anselm Cluster login nodes to the outside world, are restricted to following ports: -|Port|Protocol| -|---|---| -|22|ssh| -|80|http| -|443|https| -|9418|git| +| Port | Protocol | +| ---- | -------- | +| 22 | ssh | +| 80 | http | +| 443 | https | +| 9418 | git | -!!! Note "Note" - Please use **ssh port forwarding** and proxy servers to connect from Anselm to all other remote ports. +!!! note + Please use **ssh port forwarding** and proxy servers to connect from Anselm to all other remote ports. Outgoing connections, from Anselm Cluster compute nodes are restricted to the internal network. Direct connections form compute nodes to outside world are cut. -Port forwarding ---------------- +## Port Forwarding -### Port forwarding from login nodes +### Port Forwarding From Login Nodes -!!! Note "Note" - Port forwarding allows an application running on Anselm to connect to arbitrary remote host and port. +!!! note + Port forwarding allows an application running on Anselm to connect to arbitrary remote host and port. It works by tunneling the connection from Anselm back to users workstation and forwarding from the workstation to the remote host. @@ -151,7 +148,7 @@ local $ ssh -R 6000:remote.host.com:1234 anselm.it4i.cz In this example, we establish port forwarding between port 6000 on Anselm and port 1234 on the remote.host.com. By accessing localhost:6000 on Anselm, an application will see response of remote.host.com:1234. The traffic will run via users local workstation. -Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration screen, load your Anselm configuration first. Then go to Connection->SSH->Tunnels to set up the port forwarding. Click Remote radio button. Insert 6000 to Source port textbox. Insert remote.host.com:1234. Click Add button, then Open. +Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration screen, load your Anselm configuration first. Then go to Connection->SSH->Tunnels to set up the port forwarding. Click Remote radio button. Insert 6000 to Source port textbox. Insert remote.host.com:1234. Click Add button, then Open. Port forwarding may be established directly to the remote host. However, this requires that user has ssh access to remote.host.com @@ -159,9 +156,10 @@ Port forwarding may be established directly to the remote host. However, this re $ ssh -L 6000:localhost:1234 remote.host.com ``` -Note: Port number 6000 is chosen as an example only. Pick any free port. +!!! note + Port number 6000 is chosen as an example only. Pick any free port. -### Port forwarding from compute nodes +### Port Forwarding From Compute Nodes Remote port forwarding from compute nodes allows applications running on the compute nodes to access hosts outside Anselm Cluster. @@ -175,12 +173,12 @@ $ ssh -TN -f -L 6000:localhost:6000 login1 In this example, we assume that port forwarding from login1:6000 to remote.host.com:1234 has been established beforehand. By accessing localhost:6000, an application running on a compute node will see response of remote.host.com:1234 -### Using proxy servers +### Using Proxy Servers Port forwarding is static, each single port is mapped to a particular port on remote host. Connection to other remote host, requires new forward. -!!! Note "Note" - Applications with inbuilt proxy support, experience unlimited access to remote hosts, via single proxy server. +!!! note + Applications with inbuilt proxy support, experience unlimited access to remote hosts, via single proxy server. To establish local proxy server on your workstation, install and run SOCKS proxy server software. On Linux, sshd demon provides the functionality. To establish SOCKS proxy server listening on port 1080 run: @@ -196,15 +194,13 @@ Once the proxy server is running, establish ssh port forwarding from Anselm to t local $ ssh -R 6000:localhost:1080 anselm.it4i.cz ``` -Now, configure the applications proxy settings to **localhost:6000**. Use port forwarding to access the [proxy server from compute nodes](#port-forwarding-from-compute-nodes) as well. +Now, configure the applications proxy settings to **localhost:6000**. Use port forwarding to access the [proxy server from compute nodes](#port-forwarding-from-compute-nodes) as well. -Graphical User Interface ------------------------- +## Graphical User Interface -- The [X Window system](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters. -- The [Virtual Network Computing](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). +* The [X Window system](../general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters. +* The [Virtual Network Computing](../general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). -VPN Access ----------- +## VPN Access -- Access to IT4Innovations internal resources via [VPN](../get-started-with-it4innovations/vpn-access/). +* Access to IT4Innovations internal resources via [VPN](../general/accessing-the-clusters/vpn-access/). diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md b/docs.it4i/anselm/software/ansys/ansys-cfx.md similarity index 59% rename from docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md rename to docs.it4i/anselm/software/ansys/ansys-cfx.md index 1693bc723792cad5ae5cabbc979a5b2a2a525b4b..b816f026430e7573ddffb81d5ced15770994435b 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md +++ b/docs.it4i/anselm/software/ansys/ansys-cfx.md @@ -1,7 +1,6 @@ -ANSYS CFX -========= +# ANSYS CFX -[ANSYS CFX](http://www.ansys.com/Products/Simulation+Technology/Fluid+Dynamics/Fluid+Dynamics+Products/ANSYS+CFX) software is a high-performance, general purpose fluid dynamics program that has been applied to solve wide-ranging fluid flow problems for over 20 years. At the heart of ANSYS CFX is its advanced solver technology, the key to achieving reliable and accurate solutions quickly and robustly. The modern, highly parallelized solver is the foundation for an abundant choice of physical models to capture virtually any type of phenomena related to fluid flow. The solver and its many physical models are wrapped in a modern, intuitive, and flexible GUI and user environment, with extensive capabilities for customization and automation using session files, scripting and a powerful expression language. +[ANSYS CFX](http://www.ansys.com/products/fluids/ansys-cfx) software is a high-performance, general purpose fluid dynamics program that has been applied to solve wide-ranging fluid flow problems for over 20 years. At the heart of ANSYS CFX is its advanced solver technology, the key to achieving reliable and accurate solutions quickly and robustly. The modern, highly parallelized solver is the foundation for an abundant choice of physical models to capture virtually any type of phenomena related to fluid flow. The solver and its many physical models are wrapped in a modern, intuitive, and flexible GUI and user environment, with extensive capabilities for customization and automation using session files, scripting and a powerful expression language. To run ANSYS CFX in batch mode you can utilize/modify the default cfx.pbs script and execute it via the qsub command. @@ -35,7 +34,7 @@ procs_per_host=1 hl="" for host in `cat $PBS_NODEFILE` do - if [ "$hl" = "" ] + if ["$hl" = "" ] then hl="$host:$procs_per_host" else hl="${hl}:$host:$procs_per_host" fi @@ -48,7 +47,7 @@ echo Machines: $hl /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r ``` -Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md b/docs.it4i/anselm/software/ansys/ansys-fluent.md similarity index 81% rename from docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md rename to docs.it4i/anselm/software/ansys/ansys-fluent.md index fc7f718e87403f193b9491c43ee293169bf8bfbd..ff1f7cdd21a26283fd7522fc2cc286f00bde73a7 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md +++ b/docs.it4i/anselm/software/ansys/ansys-fluent.md @@ -1,11 +1,10 @@ -ANSYS Fluent -============ +# ANSYS Fluent -[ANSYS Fluent](http://www.ansys.com/Products/Simulation+Technology/Fluid+Dynamics/Fluid+Dynamics+Products/ANSYS+Fluent) +[ANSYS Fluent](http://www.ansys.com/products/fluids/ansys-fluent) software contains the broad physical modeling capabilities needed to model flow, turbulence, heat transfer, and reactions for industrial applications ranging from air flow over an aircraft wing to combustion in a furnace, from bubble columns to oil platforms, from blood flow to semiconductor manufacturing, and from clean room design to wastewater treatment plants. Special models that give the software the ability to model in-cylinder combustion, aeroacoustics, turbomachinery, and multiphase systems have served to broaden its reach. -1. Common way to run Fluent over PBS file ------------------------------------------ +## Common Way to Run Fluent Over PBS File + To run ANSYS Fluent in batch mode you can utilize/modify the default fluent.pbs script and execute it via the qsub command. ```bash @@ -57,18 +56,17 @@ Journal file with definition of the input geometry and boundary conditions and d The appropriate dimension of the problem has to be set by parameter (2d/3d). -2. Fast way to run Fluent from command line --------------------------------------------------------- +## Fast Way to Run Fluent From Command Line ```bash fluent solver_version [FLUENT_options] -i journal_file -pbs ``` -This syntax will start the ANSYS FLUENT job under PBS Professional using the qsub command in a batch manner. When resources are available, PBS Professional will start the job and return a job ID, usually in the form of *job_ID.hostname*. This job ID can then be used to query, control, or stop the job using standard PBS Professional commands, such as qstat or qdel. The job will be run out of the current working directory, and all output will be written to the file fluent.o *job_ID*. +This syntax will start the ANSYS FLUENT job under PBS Professional using the qsub command in a batch manner. When resources are available, PBS Professional will start the job and return a job ID, usually in the form of _job_ID.hostname_. This job ID can then be used to query, control, or stop the job using standard PBS Professional commands, such as qstat or qdel. The job will be run out of the current working directory, and all output will be written to the file fluent.o _job_ID_. + +## Running Fluent via User's Config File -3. Running Fluent via user's config file ----------------------------------------- -The sample script uses a configuration file called pbs_fluent.conf if no command line arguments are present. This configuration file should be present in the directory from which the jobs are submitted (which is also the directory in which the jobs are executed). The following is an example of what the content of pbs_fluent.conf can be: +The sample script uses a configuration file called pbs_fluent.conf if no command line arguments are present. This configuration file should be present in the directory from which the jobs are submitted (which is also the directory in which the jobs are executed). The following is an example of what the content of pbs_fluent.conf can be: ```bash input="example_small.flin" @@ -102,7 +100,7 @@ To run ANSYS Fluent in batch mode with user's config file you can utilize/modify cd $PBS_O_WORKDIR #We assume that if they didn’t specify arguments then they should use the - #config file if [ "xx${input}${case}${mpp}${fluent_args}zz" = "xxzz" ]; then + #config file if ["xx${input}${case}${mpp}${fluent_args}zz" = "xxzz" ]; then if [ -f pbs_fluent.conf ]; then . pbs_fluent.conf else @@ -143,8 +141,8 @@ To run ANSYS Fluent in batch mode with user's config file you can utilize/modify It runs the jobs out of the directory from which they are submitted (PBS_O_WORKDIR). -4. Running Fluent in parralel ------------------------------ +## Running Fluent in Parralel + Fluent could be run in parallel only under Academic Research license. To do so this ANSYS Academic Research license must be placed before ANSYS CFD license in user preferences. To make this change anslic_admin utility should be run ```bash diff --git a/docs.it4i/anselm/software/ansys/ansys-ls-dyna.md b/docs.it4i/anselm/software/ansys/ansys-ls-dyna.md new file mode 100644 index 0000000000000000000000000000000000000000..af46af93a30600c440e4e52cb5fdbd1edb677660 --- /dev/null +++ b/docs.it4i/anselm/software/ansys/ansys-ls-dyna.md @@ -0,0 +1,55 @@ +# ANSYS LS-DYNA + +**[ANSYSLS-DYNA](http://www.ansys.com/products/structures/ansys-ls-dyna)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. + +To run ANSYS LS-DYNA in batch mode you can utilize/modify the default ansysdyna.pbs script and execute it via the qsub command. + +```bash +#!/bin/bash +#PBS -l nodes=2:ppn=16 +#PBS -q qprod +#PBS -N $USER-DYNA-Project +#PBS -A XX-YY-ZZ + +#! Mail to user when job terminate or abort +#PBS -m ae + +#!change the working directory (default is home directory) +#cd <working directory> +WORK_DIR="/scratch/$USER/work" +cd $WORK_DIR + +echo Running on host `hostname` +echo Time is `date` +echo Directory is `pwd` +echo This jobs runs on the following processors: +echo `cat $PBS_NODEFILE` + +#! Counts the number of processors +NPROCS=`wc -l < $PBS_NODEFILE` + +echo This job has allocated $NPROCS nodes + +module load ansys + +#### Set number of processors per host listing +#### (set to 1 as $PBS_NODEFILE lists each node twice if :ppn=2) +procs_per_host=1 +#### Create host list +hl="" +for host in `cat $PBS_NODEFILE` +do + if ["$hl" = "" ] + then hl="$host:$procs_per_host" + else hl="${hl}:$host:$procs_per_host" + fi +done + +echo Machines: $hl + +/ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl +``` + +Header of the PBS file (above) is common and description can be find on [this site](../../job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. + +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ANSYS solver via parameter i= diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/anselm/software/ansys/ansys-mechanical-apdl.md similarity index 94% rename from docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md rename to docs.it4i/anselm/software/ansys/ansys-mechanical-apdl.md index 69d920f50c3db8e8b0ad7e4382d86b8a92ed1797..cdaac19ff664acbcd79c8c234ff30ff54cf06cad 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md +++ b/docs.it4i/anselm/software/ansys/ansys-mechanical-apdl.md @@ -1,7 +1,6 @@ -ANSYS MAPDL -=========== +# ANSYS MAPDL -**[ANSYS Multiphysics](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/ANSYS+Multiphysics)** +**[ANSYS Multiphysics](http://www.ansys.com/products/multiphysics)** software offers a comprehensive product solution for both multiphysics and single-physics analysis. The product includes structural, thermal, fluid and both high- and low-frequency electromagnetic analysis. The product also contains solutions for both direct and sequentially coupled physics problems including direct coupled-field elements and the ANSYS multi-field solver. To run ANSYS MAPDL in batch mode you can utilize/modify the default mapdl.pbs script and execute it via the qsub command. @@ -36,7 +35,7 @@ procs_per_host=1 hl="" for host in `cat $PBS_NODEFILE` do - if [ "$hl" = "" ] + if ["$hl" = "" ] then hl="$host:$procs_per_host" else hl="${hl}:$host:$procs_per_host" fi diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md b/docs.it4i/anselm/software/ansys/ansys.md similarity index 87% rename from docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md rename to docs.it4i/anselm/software/ansys/ansys.md index 8d0a501e6f6ccc91ad6e32cbb1e69b19416540c1..16be5639d93fc6d14baaff251a5b09a1d0e31b62 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md +++ b/docs.it4i/anselm/software/ansys/ansys.md @@ -1,9 +1,8 @@ -Overview of ANSYS Products -========================== +# Overview of ANSYS Products **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM) -Anselm provides commercial as well as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [ More about licensing here](ansys/licensing/) +Anselm provides commercial as well as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [More about licensing here](ansys/licensing/) To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module: @@ -14,4 +13,3 @@ To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL, ANSYS supports interactive regime, but due to assumed solution of extremely difficult tasks it is not recommended. If user needs to work in interactive regime we recommend to configure the RSM service on the client machine which allows to forward the solution to the Anselm directly from the client's Workbench project (see ANSYS RSM service). - diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md b/docs.it4i/anselm/software/ansys/ls-dyna.md similarity index 83% rename from docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md rename to docs.it4i/anselm/software/ansys/ls-dyna.md index dc9ca58c1dd83e44411ec3af34d63dc5cfb06636..063bcf245e7b74781c953eebb309adfad5c0e48d 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md +++ b/docs.it4i/anselm/software/ansys/ls-dyna.md @@ -1,5 +1,4 @@ -LS-DYNA -======= +# LS-DYNA [LS-DYNA](http://www.lstc.com/) is a multi-purpose, explicit and implicit finite element program used to analyze the nonlinear dynamic response of structures. Its fully automated contact analysis capability, a wide range of constitutive models to simulate a whole range of engineering materials (steels, composites, foams, concrete, etc.), error-checking features and the high scalability have enabled users worldwide to solve successfully many complex problems. Additionally LS-DYNA is extensively used to simulate impacts on structures from drop tests, underwater shock, explosions or high-velocity impacts. Explosive forming, process engineering, accident reconstruction, vehicle dynamics, thermal brake disc analysis or nuclear safety are further areas in the broad range of possible applications. In leading-edge research LS-DYNA is used to investigate the behavior of materials like composites, ceramics, concrete, or wood. Moreover, it is used in biomechanics, human modeling, molecular structures, casting, forging, or virtual testing. @@ -31,6 +30,6 @@ module load lsdyna /apps/engineering/lsdyna/lsdyna700s i=input.k ``` -Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution.html). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA **.k** file which is attached to the LS-DYNA solver via parameter i= diff --git a/docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md b/docs.it4i/anselm/software/chemistry/molpro.md similarity index 73% rename from docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md rename to docs.it4i/anselm/software/chemistry/molpro.md index f345e50536c19178295df237e80f0b035a09a524..9b08cb6ec8d2137e936f391eae4af97789d4f229 100644 --- a/docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md +++ b/docs.it4i/anselm/software/chemistry/molpro.md @@ -1,40 +1,39 @@ -Molpro -====== +# Molpro Molpro is a complete system of ab initio programs for molecular electronic structure calculations. -About Molpro ------------- +## About Molpro + Molpro is a software package used for accurate ab-initio quantum chemistry calculations. More information can be found at the [official webpage](http://www.molpro.net/). -License -------- +## License + Molpro software package is available only to users that have a valid license. Please contact support to enable access to Molpro if you have a valid license appropriate for running on our cluster (eg. academic research group licence, parallel execution). To run Molpro, you need to have a valid license token present in " $HOME/.molpro/token". You can download the token from [Molpro website](https://www.molpro.net/licensee/?portal=licensee). -Installed version ------------------ +## Installed Version + Currently on Anselm is installed version 2010.1, patch level 45, parallel version compiled with Intel compilers and Intel MPI. Compilation parameters are default: -|Parameter|Value| -|---|---| -|max number of atoms|200| -|max number of valence orbitals|300| -|max number of basis functions|4095| -|max number of states per symmmetry|20| -|max number of state symmetries|16| -|max number of records|200| -|max number of primitives|maxbfn x [2]| - -Running ------- +| Parameter | Value | +| ---------------------------------- | ------------ | +| max number of atoms | 200 | +| max number of valence orbitals | 300 | +| max number of basis functions | 4095 | +| max number of states per symmmetry | 20 | +| max number of state symmetries | 16 | +| max number of records | 200 | +| max number of primitives | maxbfn x [2] | + +## Running + Molpro is compiled for parallel execution using MPI and OpenMP. By default, Molpro reads the number of allocated nodes from PBS and launches a data server on one node. On the remaining allocated nodes, compute processes are launched, one process per node, each with 16 threads. You can modify this behavior by using -n, -t and helper-server options. Please refer to the [Molpro documentation](http://www.molpro.net/info/2010.1/doc/manual/node9.html) for more details. -!!! Note "Note" - The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS. +!!! note + The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS. You are advised to use the -d option to point to a directory in [SCRATCH file system](../../storage/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system. diff --git a/docs.it4i/anselm/software/chemistry/nwchem.md b/docs.it4i/anselm/software/chemistry/nwchem.md new file mode 100644 index 0000000000000000000000000000000000000000..9f09fe794a121ddc173d3a037fe0e6e3e7101163 --- /dev/null +++ b/docs.it4i/anselm/software/chemistry/nwchem.md @@ -0,0 +1,42 @@ +# NWChem + +## Introduction + +NWChem aims to provide its users with computational chemistry tools that are scalable both in their ability to treat large scientific computational chemistry problems efficiently, and in their use of available parallel computing resources from high-performance parallel supercomputers to conventional workstation clusters. + +[Homepage](http://www.nwchem-sw.org/index.php/Main_Page) + +## Installed Versions + +The following versions are currently installed: + +* 6.1.1, not recommended, problems have been observed with this version +* 6.3-rev2-patch1, current release with QMD patch applied. Compiled with Intel compilers, MKL and Intel MPI +* 6.3-rev2-patch1-openmpi, same as above, but compiled with OpenMPI and NWChem provided BLAS instead of MKL. This version is expected to be slower +* 6.3-rev2-patch1-venus, this version contains only libraries for VENUS interface linking. Does not provide standalone NWChem executable + +For a current list of installed versions, execute: + +```bash + module avail nwchem +``` + +## Running + +NWChem is compiled for parallel MPI execution. Normal procedure for MPI jobs applies. Sample jobscript: + +```bash + #PBS -A IT4I-0-0 + #PBS -q qprod + #PBS -l select=1:ncpus=16 + + module add nwchem/6.3-rev2-patch1 + mpirun -np 16 nwchem h2o.nw +``` + +## Options + +Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : + +* MEMORY : controls the amount of memory NWChem will use +* SCRATCH_DIR : set this to a directory in [SCRATCH file system](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g.. "scf direct" diff --git a/docs.it4i/anselm-cluster-documentation/software/compilers.md b/docs.it4i/anselm/software/compilers.md similarity index 78% rename from docs.it4i/anselm-cluster-documentation/software/compilers.md rename to docs.it4i/anselm/software/compilers.md index ab8935abfffc8d3c973975bf007cf7f697d7e340..d1e59f29fd5c7862e8ad28780c1355ba837f8da1 100644 --- a/docs.it4i/anselm-cluster-documentation/software/compilers.md +++ b/docs.it4i/anselm/software/compilers.md @@ -1,25 +1,24 @@ -Compilers -========= +# Compilers -##Available compilers, including GNU, INTEL and UPC compilers +## Available Compilers, Including GNU, INTEL, and UPC Compilers Currently there are several compilers for different programming languages available on the Anselm cluster: -- C/C++ -- Fortran 77/90/95 -- Unified Parallel C -- Java -- NVIDIA CUDA +* C/C++ +* Fortran 77/90/95 +* Unified Parallel C +* Java +* NVIDIA CUDA The C/C++ and Fortran compilers are divided into two main groups GNU and Intel. -Intel Compilers ---------------- +## Intel Compilers + For information about the usage of Intel Compilers and other Intel products, please read the [Intel Parallel studio](intel-suite/) page. -GNU C/C++ and Fortran Compilers -------------------------------- -For compatibility reasons there are still available the original (old 4.4.6-4) versions of GNU compilers as part of the OS. These are accessible in the search path by default. +## GNU C/C++ and Fortran Compilers + +For compatibility reasons there are still available the original (old 4.4.6-4) versions of GNU compilers as part of the OS. These are accessible in the search path by default. It is strongly recommended to use the up to date version (4.8.1) which comes with the module gcc: @@ -40,14 +39,14 @@ With the module loaded two environment variables are predefined. One for maximum -O0 -g ``` -For more informations about the possibilities of the compilers, please see the man pages. +For more information about the possibilities of the compilers, please see the man pages. + +## Unified Parallel C -Unified Parallel C ------------------- UPC is supported by two compiler/runtime implementations: -- GNU - SMP/multi-threading support only -- Berkley - multi-node support as well as SMP/multi-threading support +* GNU - SMP/multi-threading support only +* Berkley - multi-node support as well as SMP/multi-threading support ### GNU UPC Compiler @@ -90,7 +89,7 @@ To run the example with 5 threads issue $ ./count.upc.x -fupc-threads-5 ``` -For more informations see the man pages. +For more information see the man pages. ### Berkley UPC Compiler @@ -103,7 +102,10 @@ To use the Berkley UPC compiler and runtime environment to run the binaries use As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only. -For production runs, it is recommended to use the native Infiband implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. Please note, that **the selection of the network is done at the compile time** and not at runtime (as expected)! +For production runs, it is recommended to use the native Infiband implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. + +!!! warning + Selection of the network is done at the compile time and not at runtime (as expected)! Example UPC code: @@ -144,12 +146,12 @@ To run the example on two compute nodes using all 32 cores, with 32 threads, iss $ upcrun -n 32 ./hello.upc.x ``` -For more informations see the man pages. +For more information see the man pages. + +## Java -Java ----- For information how to use Java (runtime and/or compiler), please read the [Java page](java/). -NVIDIA CUDA ------------ +## NVIDIA CUDA + For information on how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](nvidia-cuda/). diff --git a/docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md b/docs.it4i/anselm/software/comsol-multiphysics.md similarity index 79% rename from docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md rename to docs.it4i/anselm/software/comsol-multiphysics.md index ee61c219bafd9c04cad860204863c68a1cb34c54..457c1aa8fc5d34a4429d5684f977da70d7683b4a 100644 --- a/docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md +++ b/docs.it4i/anselm/software/comsol-multiphysics.md @@ -1,37 +1,36 @@ -COMSOL Multiphysics® -==================== +# COMSOL Multiphysics + +## Introduction -Introduction -------------------------- [COMSOL](http://www.comsol.com) is a powerful environment for modelling and solving various engineering and scientific problems based on partial differential equations. COMSOL is designed to solve coupled or multiphysics phenomena. For many standard engineering problems COMSOL provides add-on products such as electrical, mechanical, fluid flow, and chemical applications. -- [Structural Mechanics Module](http://www.comsol.com/structural-mechanics-module), -- [Heat Transfer Module](http://www.comsol.com/heat-transfer-module), -- [CFD Module](http://www.comsol.com/cfd-module), -- [Acoustics Module](http://www.comsol.com/acoustics-module), -- and [many others](http://www.comsol.com/products) +* [Structural Mechanics Module](http://www.comsol.com/structural-mechanics-module), +* [Heat Transfer Module](http://www.comsol.com/heat-transfer-module), +* [CFD Module](http://www.comsol.com/cfd-module), +* [Acoustics Module](http://www.comsol.com/acoustics-module), +* and [many others](http://www.comsol.com/products) COMSOL also allows an interface support for equation-based modelling of partial differential equations. -Execution ----------------------- +## Execution + On the Anselm cluster COMSOL is available in the latest stable version. There are two variants of the release: -- **Non commercial** or so called **EDU variant**, which can be used for research and educational purposes. -- **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing will be posted here soon. +* **Non commercial** or so called **EDU variant**, which can be used for research and educational purposes. +* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing will be posted here soon. To load the of COMSOL load the module ```bash - $ module load comsol + $ module load comsol ``` By default the **EDU variant** will be loaded. If user needs other version or variant, load the particular version. To obtain the list of available versions use ```bash - $ module avail comsol + $ module avail comsol ``` If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the Virtual Network Computing (VNC). @@ -50,7 +49,7 @@ To run COMSOL in batch mode, without the COMSOL Desktop GUI environment, user ca #PBS -l select=3:ncpus=16 #PBS -q qprod #PBS -N JOB_NAME -#PBS -A PROJECT_ID +#PBS -A PROJECT_ID cd /scratch/$USER/ || exit @@ -72,9 +71,9 @@ comsol -nn ${ntask} batch -configuration /tmp –mpiarg –rmk –mpiarg pbs -tm Working directory has to be created before sending the (comsol.pbs) job script into the queue. Input file (name_input_f.mph) has to be in working directory or full path to input file has to be specified. The appropriate path to the temp directory of the job has to be set by command option (-tmpdir). -LiveLink™* *for MATLAB®^ -------------------------- -COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL**®** API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB. +## LiveLink for MATLAB + +COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB. LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On Anselm 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](../isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode. @@ -96,7 +95,7 @@ To run LiveLink for MATLAB in batch mode with (comsol_matlab.pbs) job script you #PBS -l select=3:ncpus=16 #PBS -q qprod #PBS -N JOB_NAME -#PBS -A PROJECT_ID +#PBS -A PROJECT_ID cd /scratch/$USER || exit diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md b/docs.it4i/anselm/software/debuggers/allinea-ddt.md similarity index 75% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md rename to docs.it4i/anselm/software/debuggers/allinea-ddt.md index e09cd64a3f7724af581315d1e3a936b84faa381f..6c1c664fb22163d3f9eadd023486494870f2a0a9 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md +++ b/docs.it4i/anselm/software/debuggers/allinea-ddt.md @@ -1,5 +1,4 @@ -Allinea Forge (DDT,MAP) -======================= +# Allinea Forge (DDT,MAP) Allinea Forge consist of two tools - debugger DDT and profiler MAP. @@ -7,20 +6,19 @@ Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or Op Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses pthreads, OpenMP or MPI. -License and Limitations for Anselm Users ----------------------------------------- +## License and Limitations for Anselm Users + On Anselm users can debug OpenMP or MPI code that runs up to 64 parallel processes. In case of debugging GPU or Xeon Phi accelerated codes the limit is 8 accelerators. These limitation means that: -- 1 user can debug up 64 processes, or -- 32 users can debug 2 processes, etc. +* 1 user can debug up 64 processes, or +* 32 users can debug 2 processes, etc. In case of debugging on accelerators: -- 1 user can debug on up to 8 accelerators, or -- 8 users can debug on single accelerator. +* 1 user can debug on up to 8 accelerators, or +* 8 users can debug on single accelerator. -Compiling Code to run with DDT ------------------------------- +## Compiling Code to Run With DDT ### Modules @@ -45,24 +43,23 @@ $ mpicc -g -O0 -o test_debug test.c $ mpif90 -g -O0 -o test_debug test.f ``` -### Compiler flags +### Compiler Flags Before debugging, you need to compile your code with theses flags: -!!! Note "Note" - - **g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. +!!! note + \* **g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. + \* **O0** : Suppress all optimizations. - - **O0** : Suppress all optimizations. +## Starting a Job With DDT -Starting a Job with DDT ------------------------ -Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh: +Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh: ```bash $ ssh -X username@anselm.it4i.cz ``` -Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](https://docs.it4i.cz/anselm-cluster-documentation/software/debuggers/resolveuid/11e53ad0d2fd4c5187537f4baeedff33) +Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command: @@ -86,8 +83,8 @@ To start the debugging directly without the submission window, user can specify ddt -start -np 4 ./hello_debug_impi ``` -Documentation -------------- +## Documentation + Users can find original User Guide after loading the DDT module: ```bash diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md similarity index 70% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md rename to docs.it4i/anselm/software/debuggers/allinea-performance-reports.md index a563ec561798a03620183c58d3b647527a1316ab..614e6277ba5fcb8401b9a68668626709aa143ede 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md @@ -1,18 +1,15 @@ -Allinea Performance Reports -=========================== +# Allinea Performance Reports -##quick application profiling +## Introduction -Introduction ------------- Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs. The Allinea Performance Reports is most useful in profiling MPI programs. Our license is limited to 64 MPI processes. -Modules -------- +## Modules + Allinea Performance Reports version 6.0 is available ```bash @@ -21,10 +18,10 @@ Allinea Performance Reports version 6.0 is available The module sets up environment variables, required for using the Allinea Performance Reports. This particular command loads the default module, which is performance reports version 4.2. -Usage ------ -!!! Note "Note" - Use the the perf-report wrapper on your (MPI) program. +## Usage + +!!! note + Use the the perf-report wrapper on your (MPI) program. Instead of [running your MPI program the usual way](../mpi/), use the the perf report wrapper: @@ -32,10 +29,10 @@ Instead of [running your MPI program the usual way](../mpi/), use the the perf r $ perf-report mpirun ./mympiprog.x ``` -The mpi program will run as usual. The perf-report creates two additional files, in *.txt and *.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../resource-allocation-and-job-execution/job-submission-and-execution/). +The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../job-submission-and-execution/). + +## Example -Example -------- In this example, we will be profiling the mympiprog.x MPI program, using Allinea performance reports. Assume that the code is compiled with Intel compilers and linked against Intel MPI library: First, we allocate some nodes via the express queue: @@ -59,4 +56,4 @@ Now lets profile the code: $ perf-report mpirun ./mympiprog.x ``` -Performance report files [mympiprog_32p*.txt](mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p*.html](mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. +Performance report files [mympiprog_32p\*.txt](../../../src/mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](../../../src/mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md b/docs.it4i/anselm/software/debuggers/cube.md similarity index 53% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md rename to docs.it4i/anselm/software/debuggers/cube.md index a416deab5cd4873a6e9a19877d6b0545c7ea5dfc..a7f88955e78159f5800a37e603f91fa09e3ccdbe 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md +++ b/docs.it4i/anselm/software/debuggers/cube.md @@ -1,38 +1,36 @@ -CUBE -==== +# CUBE + +## Introduction -Introduction ------------- CUBE is a graphical performance report explorer for displaying data from Score-P and Scalasca (and other compatible tools). The name comes from the fact that it displays performance data in a three-dimensions : -- **performance metric**, where a number of metrics are available, such as communication time or cache misses, -- **call path**, which contains the call tree of your program -- s**ystem resource**, which contains system's nodes, processes and threads, depending on the parallel programming model. +* **performance metric**, where a number of metrics are available, such as communication time or cache misses, +* **call path**, which contains the call tree of your program +* **system resource**, which contains system's nodes, processes and threads, depending on the parallel programming model. Each dimension is organized in a tree, for example the time performance metric is divided into Execution time and Overhead time, call path dimension is organized by files and routines in your source code etc.  -*Figure 1. Screenshot of CUBE displaying data from Scalasca.* +\*Figure 1. Screenshot of CUBE displaying data from Scalasca.\* Each node in the tree is colored by severity (the color scheme is displayed at the bottom of the window, ranging from the least severe blue to the most severe being red). For example in Figure 1, we can see that most of the point-to-point MPI communication happens in routine exch_qbc, colored red. -Installed versions ------------------- +## Installed Versions + Currently, there are two versions of CUBE 4.2.3 available as [modules](../../environment-and-modules/): -- cube/4.2.3-gcc, compiled with GCC -- cube/4.2.3-icc, compiled with Intel compiler +* cube/4.2.3-gcc, compiled with GCC +* cube/4.2.3-icc, compiled with Intel compiler + +## Usage -Usage ------ CUBE is a graphical application. Refer to Graphical User Interface documentation for a list of methods to launch graphical applications on Anselm. -!!! Note "Note" - Analyzing large data sets can consume large amount of CPU and RAM. Do not perform large analysis on login nodes. +!!! note + Analyzing large data sets can consume large amount of CPU and RAM. Do not perform large analysis on login nodes. -After loading the appropriate module, simply launch cube command, or alternatively you can use scalasca -examine command to launch the GUI. Note that for Scalasca datasets, if you do not analyze the data with scalasca -examine before to opening them with CUBE, not all performance data will be available. +After loading the appropriate module, simply launch cube command, or alternatively you can use scalasca -examine command to launch the GUI. Note that for Scalasca datasets, if you do not analyze the data with scalasca -examine before to opening them with CUBE, not all performance data will be available. References -1. <http://www.scalasca.org/software/cube-4.x/download.html> - +1\. <http://www.scalasca.org/software/cube-4.x/download.html> diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/debuggers.md b/docs.it4i/anselm/software/debuggers/debuggers.md similarity index 85% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/debuggers.md rename to docs.it4i/anselm/software/debuggers/debuggers.md index b36213ce422ff4e93328be866b62cd86c0ef74bc..dd2bc60d833d9fa269c1df98d895fb969a601cd7 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/debuggers.md +++ b/docs.it4i/anselm/software/debuggers/debuggers.md @@ -1,12 +1,11 @@ -Debuggers and profilers summary -=============================== +# Debuggers and profilers summary + +## Introduction -Introduction ------------- We provide state of the art programms and tools to develop, profile and debug HPC codes at IT4Innovations. On these pages, we provide an overview of the profiling and debugging tools available on Anslem at IT4I. -Intel debugger --------------- +## Intel Debugger + The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI. ```bash @@ -16,8 +15,8 @@ The intel debugger version 13.0 is available, via module intel. The debugger wor Read more at the [Intel Debugger](intel-suite/intel-debugger/) page. -Allinea Forge (DDT/MAP) ------------------------ +## Allinea Forge (DDT/MAP) + Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process even if these processes are distributed across a cluster using an MPI implementation. ```bash @@ -27,8 +26,8 @@ Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or Op Read more at the [Allinea DDT](debuggers/allinea-ddt/) page. -Allinea Performance Reports ---------------------------- +## Allinea Performance Reports + Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs. Our license is limited to 64 MPI processes. ```bash @@ -38,8 +37,8 @@ Allinea Performance Reports characterize the performance of HPC application runs Read more at the [Allinea Performance Reports](debuggers/allinea-performance-reports/) page. -RougeWave Totalview -------------------- +## RougeWave Totalview + TotalView is a source- and machine-level debugger for multi-process, multi-threaded programs. Its wide range of tools provides ways to analyze, organize, and test programs, making it easy to isolate and identify problems in individual threads and processes in programs of great complexity. ```bash @@ -49,8 +48,8 @@ TotalView is a source- and machine-level debugger for multi-process, multi-threa Read more at the [Totalview](debuggers/total-view/) page. -Vampir trace analyzer ---------------------- +## Vampir Trace Analyzer + Vampir is a GUI trace analyzer for traces in OTF format. ```bash @@ -58,4 +57,4 @@ Vampir is a GUI trace analyzer for traces in OTF format. $ vampir ``` -Read more at the [Vampir](../../salomon/software/debuggers/vampir/) page. +Read more at the [Vampir](vampir/) page. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md b/docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md similarity index 92% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md rename to docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md index 5ff0b98220f8253d40e360c981943793f9d742e7..f9e8e88dcaf2186ea59519f7a7b31305fd1287d6 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md +++ b/docs.it4i/anselm/software/debuggers/intel-performance-counter-monitor.md @@ -1,23 +1,22 @@ -Intel Performance Counter Monitor -================================= +# Intel Performance Counter Monitor + +## Introduction -Introduction ------------- Intel PCM (Performance Counter Monitor) is a tool to monitor performance hardware counters on Intel>® processors, similar to [PAPI](papi/). The difference between PCM and PAPI is that PCM supports only Intel hardware, but PCM can monitor also uncore metrics, like memory controllers and >QuickPath Interconnect links. -Installed version ------------------------------- +## Installed Version + Currently installed version 2.6. To load the [module](../../environment-and-modules/), issue: ```bash $ module load intelpcm ``` -Command line tools ------------------- +## Command Line Tools + PCM provides a set of tools to monitor system/or application. -### pcm-memory +### Pcm-Memory Measures memory bandwidth of your application or the whole system. Usage: @@ -53,28 +52,28 @@ Sample output: -- System Read Throughput(MB/s): 4.93 -- -- System Write Throughput(MB/s): 3.43 -- -- System Memory Throughput(MB/s): 8.35 -- - ---------------------------------------||--------------------------------------- + ---------------------------------------||--------------------------------------- ``` -### pcm-msr +### Pcm-Msr -Command pcm-msr.x can be used to read/write model specific registers of the CPU. +Command pcm-msr.x can be used to read/write model specific registers of the CPU. -### pcm-numa +### Pcm-Numa NUMA monitoring utility does not work on Anselm. -### pcm-pcie +### Pcm-Pcie -Can be used to monitor PCI Express bandwith. Usage: pcm-pcie.x <delay> +Can be used to monitor PCI Express bandwith. Usage: pcm-pcie.x <delay> -### pcm-power +### Pcm-Power -Displays energy usage and thermal headroom for CPU and DRAM sockets. Usage: pcm-power.x <delay> | <external program> +Displays energy usage and thermal headroom for CPU and DRAM sockets. Usage: `pcm-power.x <delay> | <external program>` -### pcm +### Pcm -This command provides an overview of performance counters and memory usage. Usage: pcm.x <delay> | <external program> +This command provides an overview of performance counters and memory usage. Usage: `pcm.x <delay> | <external program>` Sample output : @@ -185,16 +184,16 @@ Sample output : Cleaning up ``` -### pcm-sensor +### Pcm-Sensor Can be used as a sensor for ksysguard GUI, which is currently not installed on Anselm. -API ---- +## API + In a similar fashion to PAPI, PCM provides a C++ API to access the performance counter from within your application. Refer to the [Doxygen documentation](http://intel-pcm-api-documentation.github.io/classPCM.html) for details of the API. -!!! Note "Note" - Due to security limitations, using PCM API to monitor your applications is currently not possible on Anselm. (The application must be run as root user) +!!! note + Due to security limitations, using PCM API to monitor your applications is currently not possible on Anselm. (The application must be run as root user) Sample program using the API : @@ -275,8 +274,8 @@ Sample output: Bytes read:12513408 ``` -References ----------- -1. <https://software.intel.com/en-us/articles/intel-performance-counter-monitor-a-better-way-to-measure-cpu-utilization> -2. <https://software.intel.com/sites/default/files/m/3/2/2/xeon-e5-2600-uncore-guide.pdf> Intel® Xeon® Processor E5-2600 Product Family Uncore Performance Monitoring Guide. -3. <http://intel-pcm-api-documentation.github.io/classPCM.html> API Documentation +## References + +1. <https://software.intel.com/en-us/articles/intel-performance-counter-monitor-a-better-way-to-measure-cpu-utilization> +1. <https://software.intel.com/sites/default/files/m/3/2/2/xeon-e5-2600-uncore-guide.pdf> Intel® Xeon® Processor E5-2600 Product Family Uncore Performance Monitoring Guide. +1. <http://intel-pcm-api-documentation.github.io/classPCM.html> API Documentation diff --git a/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md new file mode 100644 index 0000000000000000000000000000000000000000..e9921046dd13f4b3b3b345f2666b426f2bd5ca9c --- /dev/null +++ b/docs.it4i/anselm/software/debuggers/intel-vtune-amplifier.md @@ -0,0 +1,73 @@ +# Intel VTune Amplifier + +## Introduction + +Intel VTune Amplifier, part of Intel Parallel studio, is a GUI profiling tool designed for Intel processors. It offers a graphical performance analysis of single core and multithreaded applications. A highlight of the features: + +* Hotspot analysis +* Locks and waits analysis +* Low level specific counters, such as branch analysis and memory + bandwidth +* Power usage analysis - frequency and sleep states. + + + +## Usage + +To launch the GUI, first load the module: + +```bash + $ module add VTune/2016_update1 +``` + +and launch the GUI : + +```bash + $ amplxe-gui +``` + +!!! note + To profile an application with VTune Amplifier, special kernel modules need to be loaded. The modules are not loaded on Anselm login nodes, thus direct profiling on login nodes is not possible. Use VTune on compute nodes and refer to the documentation on using GUI applications. + +The GUI will open in new window. Click on "_New Project..._" to create a new project. After clicking _OK_, a new window with project properties will appear. At "_Application:_", select the bath to your binary you want to profile (the binary should be compiled with -g flag). Some additional options such as command line arguments can be selected. At "_Managed code profiling mode:_" select "_Native_" (unless you want to profile managed mode .NET/Mono applications). After clicking _OK_, your project is created. + +To run a new analysis, click "_New analysis..._". You will see a list of possible analysis. Some of them will not be possible on the current CPU (e.g. Intel Atom analysis is not possible on Sandy Bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "_Advanced Hotspots_". Clicking on _Start _will start profiling of the application. + +## Remote Analysis + +VTune Amplifier also allows a form of remote analysis. In this mode, data for analysis is collected from the command line without GUI, and the results are then loaded to GUI on another machine. This allows profiling without interactive graphical jobs. To perform a remote analysis, launch a GUI somewhere, open the new analysis window and then click the button "_Command line_" in bottom right corner. It will show the command line needed to perform the selected analysis. + +The command line will look like this: + +```bash + /apps/all/VTune/2016_update1/vtune_amplifier_xe_2016.1.1.434111/bin64/amplxe-cl -collect advanced-hotspots -knob collection-detail=stack-and-callcount -mrte-mode=native -target-duration-type=veryshort -app-working-dir /home/sta545/test -- /home/sta545/test_pgsesv +``` + +Copy the line to clipboard and then you can paste it in your jobscript or in command line. After the collection is run, open the GUI once again, click the menu button in the upper right corner, and select "_Open > Result..._". The GUI will load the results from the run. + +## Xeon Phi + +!!! note + This section is outdated. It will be updated with new information soon. + +It is possible to analyze both native and offload Xeon Phi applications. For offload mode, just specify the path to the binary. For native mode, you need to specify in project properties: + +Application: ssh + +Application parameters: mic0 source ~/.profile && /path/to/your/bin + +Note that we include source ~/.profile in the command to setup environment paths [as described here](../intel-xeon-phi/). + +!!! note + If the analysis is interrupted or aborted, further analysis on the card might be impossible and you will get errors like "ERROR connecting to MIC card". In this case please contact our support to reboot the MIC card. + +You may also use remote analysis to collect data from the MIC and then analyze it in the GUI later : + +```bash + $ amplxe-cl -collect knc-hotspots -no-auto-finalize -- ssh mic0 + "export LD_LIBRARY_PATH=/apps/intel/composer_xe_2015.2.164/compiler/lib/mic/:/apps/intel/composer_xe_2015.2.164/mkl/lib/mic/; export KMP_AFFINITY=compact; /tmp/app.mic" +``` + +## References + +1. <https://www.rcac.purdue.edu/tutorials/phi/PerformanceTuningXeonPhi-Tullos.pdf> Performance Tuning for Intel® Xeon Phi™ Coprocessors diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md b/docs.it4i/anselm/software/debuggers/papi.md similarity index 79% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md rename to docs.it4i/anselm/software/debuggers/papi.md index 3bc686243940268ae0f58a52ea06ac6904a156f4..bc36923e83e2d464b40e41b3b43ce4316289c3f4 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md +++ b/docs.it4i/anselm/software/debuggers/papi.md @@ -1,16 +1,15 @@ -PAPI -==== +# PAPI + +## Introduction -Introduction ------------- Performance Application Programming Interface (PAPI) is a portable interface to access hardware performance counters (such as instruction counts and cache misses) found in most modern architectures. With the new component framework, PAPI is not limited only to CPU counters, but offers also components for CUDA, network, Infiniband etc. PAPI provides two levels of interface - a simpler, high level interface and more detailed low level interface. PAPI can be used with parallel as well as serial programs. -Usage ------ +## Usage + To use PAPI, load [module](../../environment-and-modules/) papi: ```bash @@ -19,11 +18,11 @@ To use PAPI, load [module](../../environment-and-modules/) papi: This will load the default version. Execute module avail papi for a list of installed versions. -Utilities --------- -The bin directory of PAPI (which is automatically added to $PATH upon loading the module) contains various utilites. +## Utilities + +The bin directory of PAPI (which is automatically added to $PATH upon loading the module) contains various utilites. -### papi_avail +### Papi_avail Prints which preset events are available on the current CPU. The third column indicated whether the preset event is available on the current CPU. @@ -61,52 +60,51 @@ Prints which preset events are available on the current CPU. The third column in .... ``` -### papi_native_avail +### Papi_native_avail Prints which native events are available on the current CPU. -### papi_cost +### Papi_cost Measures the cost (in cycles) of basic PAPI operations. -###papi_mem_info +### Papi_mem_info Prints information about the memory architecture of the current CPU. -PAPI API --------- +## PAPI API + PAPI provides two kinds of events: -- **Preset events** is a set of predefined common CPU events, standardized across platforms. -- **Native events **is a set of all events supported by the current hardware. This is a larger set of features than preset. For other components than CPU, only native events are usually available. +* **Preset events** is a set of predefined common CPU events, standardized across platforms. +* **Native events **is a set of all events supported by the current hardware. This is a larger set of features than preset. For other components than CPU, only native events are usually available. To use PAPI in your application, you need to link the appropriate include file. -- papi.h for C -- f77papi.h for Fortran 77 -- f90papi.h for Fortran 90 -- fpapi.h for Fortran with preprocessor +* papi.h for C +* f77papi.h for Fortran 77 +* f90papi.h for Fortran 90 +* fpapi.h for Fortran with preprocessor The include path is automatically added by papi module to $INCLUDE. -### High level API +### High Level API -Please refer to <http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:High_Level> for a description of the High level API. +Please refer to [this description of the High level API](http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:High_Level). -### Low level API +### Low Level API -Please refer to <http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:Low_Level> for a description of the Low level API. +Please refer to [this description of the Low level API](http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:Low_Level). ### Timers -PAPI provides the most accurate timers the platform can support. See <http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:Timers> +PAPI provides the most accurate timers the platform can support. [See](http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:Timers). -### System information +### System Information -PAPI can be used to query some system infromation, such as CPU name and MHz. See <http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:System_Information> +PAPI can be used to query some system infromation, such as CPU name and MHz. [See](http://icl.cs.utk.edu/projects/papi/wiki/PAPIC:System_Information). -Example -------- +## Example The following example prints MFLOPS rate of a naive matrix-matrix multiplication: @@ -126,7 +124,7 @@ The following example prints MFLOPS rate of a naive matrix-matrix multiplication /* Initialize the Matrix arrays */ for ( i=0; i<SIZE*SIZE; i++ ){ mresult[0][i] = 0.0; - matrixa[0][i] = matrixb[0][i] = rand()*(float)1.1; + matrixa[0][i] = matrixb[0][i] = rand()*(float)1.1; } /* Setup PAPI library and begin collecting data from the counters */ @@ -192,8 +190,8 @@ Now the compiler won't remove the multiplication loop. (However it is still not ### Intel Xeon Phi -!!! Note "Note" - PAPI currently supports only a subset of counters on the Intel Xeon Phi processor compared to Intel Xeon, for example the floating point operations counter is missing. +!!! note + PAPI currently supports only a subset of counters on the Intel Xeon Phi processor compared to Intel Xeon, for example the floating point operations counter is missing. To use PAPI in [Intel Xeon Phi](../intel-xeon-phi/) native applications, you need to load module with " -mic" suffix, for example " papi/5.3.2-mic" : @@ -232,8 +230,8 @@ To use PAPI in offload mode, you need to provide both host and MIC versions of P $ icc matrix-offload.c -o matrix-offload -offload-option,mic,compiler,"-L$PAPI_HOME-mic/lib -lpapi" -lpapi ``` -References ----------- -1. <http://icl.cs.utk.edu/papi/> Main project page -2. <http://icl.cs.utk.edu/projects/papi/wiki/Main_Page> Wiki -3. <http://icl.cs.utk.edu/papi/docs/> API Documentation +## References + +1. [Main project page](http://icl.cs.utk.edu/papi/) +1. [Wiki](http://icl.cs.utk.edu/projects/papi/wiki/Main_Page) +1. [API Documentation](http://icl.cs.utk.edu/papi/docs/) diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md b/docs.it4i/anselm/software/debuggers/scalasca.md similarity index 55% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md rename to docs.it4i/anselm/software/debuggers/scalasca.md index 76e227f196f4e834457f4becc4648b917e6cf2a8..19daec04e24247f40721c8ef61632d17290daa80 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md +++ b/docs.it4i/anselm/software/debuggers/scalasca.md @@ -1,36 +1,35 @@ -Scalasca -======== +# Scalasca + +## Introduction -Introduction -------------------------- [Scalasca](http://www.scalasca.org/) is a software tool that supports the performance optimization of parallel programs by measuring and analyzing their runtime behavior. The analysis identifies potential performance bottlenecks – in particular those concerning communication and synchronization – and offers guidance in exploring their causes. Scalasca supports profiling of MPI, OpenMP and hybrid MPI+OpenMP applications. -Installed versions ------------------- +## Installed Versions + There are currently two versions of Scalasca 2.0 [modules](../../environment-and-modules/) installed on Anselm: -- scalasca2/2.0-gcc-openmpi, for usage with [GNU Compiler](../compilers/) and [OpenMPI](../mpi/Running_OpenMPI/), -- scalasca2/2.0-icc-impi, for usage with [Intel Compiler](../compilers.html) and [Intel MPI](../mpi/running-mpich2/). +* scalasca2/2.0-gcc-openmpi, for usage with [GNU Compiler](../compilers/) and [OpenMPI](../mpi/Running_OpenMPI/), +* scalasca2/2.0-icc-impi, for usage with [Intel Compiler](../compilers/) and [Intel MPI](../mpi/running-mpich2/). + +## Usage -Usage ------ Profiling a parallel application with Scalasca consists of three steps: -1. Instrumentation, compiling the application such way, that the profiling data can be generated. -2. Runtime measurement, running the application with the Scalasca profiler to collect performance data. -3. Analysis of reports +1. Instrumentation, compiling the application such way, that the profiling data can be generated. +1. Runtime measurement, running the application with the Scalasca profiler to collect performance data. +1. Analysis of reports ### Instrumentation Instrumentation via " scalasca -instrument" is discouraged. Use [Score-P instrumentation](score-p/). -### Runtime measurement +### Runtime Measurement -After the application is instrumented, runtime measurement can be performed with the " scalasca -analyze" command. The syntax is: +After the application is instrumented, runtime measurement can be performed with the `scalasca -analyze` command. The syntax is: -scalasca -analyze [scalasca options] [launcher] [launcher options] [program] [program options] +`scalasca -analyze [scalasca options][launcher] [launcher options][program] [program options]` An example : @@ -40,13 +39,13 @@ An example : Some notable Scalasca options are: -**-t Enable trace data collection. By default, only summary data are collected.** -**-e <directory> Specify a directory to save the collected data to. By default, Scalasca saves the data to a directory with prefix scorep_, followed by name of the executable and launch configuration.** +* **-t Enable trace data collection. By default, only summary data are collected.** +* **-e <directory> Specify a directory to save the collected data to. By default, Scalasca saves the data to a directory with prefix scorep\_, followed by name of the executable and launch configuration.** -!!! Note "Note" - Scalasca can generate a huge amount of data, especially if tracing is enabled. Please consider saving the data to a [scratch directory](../../storage/storage/). +!!! note + Scalasca can generate a huge amount of data, especially if tracing is enabled. Please consider saving the data to a [scratch directory](../../storage/storage/). -### Analysis of reports +### Analysis of Reports For the analysis, you must have [Score-P](score-p/) and [CUBE](cube/) modules loaded. The analysis is done in two steps, first, the data is preprocessed and then CUBE GUI tool is launched. @@ -66,6 +65,6 @@ Alternatively you can open CUBE and load the data directly from here. Keep in mi Refer to [CUBE documentation](cube/) on usage of the GUI viewer. -References ----------- -1. <http://www.scalasca.org/> +## References + +1. <http://www.scalasca.org/> diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/score-p.md b/docs.it4i/anselm/software/debuggers/score-p.md similarity index 75% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/score-p.md rename to docs.it4i/anselm/software/debuggers/score-p.md index c51794fb87d2943cdd7dffb06d23aa154d7c0f6a..929d971faa2a8b465754c5563b09fa32f554eef2 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/score-p.md +++ b/docs.it4i/anselm/software/debuggers/score-p.md @@ -1,28 +1,27 @@ -Score-P -======= +# Score-P + +## Introduction -Introduction ------------- The [Score-P measurement infrastructure](http://www.vi-hps.org/projects/score-p/) is a highly scalable and easy-to-use tool suite for profiling, event tracing, and online analysis of HPC applications. Score-P can be used as an instrumentation tool for [Scalasca](scalasca/). -Installed versions ------------------- +## Installed Versions + There are currently two versions of Score-P version 1.2.6 [modules](../../environment-and-modules/) installed on Anselm : -- scorep/1.2.3-gcc-openmpi, for usage with [GNU Compiler](../compilers/) and [OpenMPI](../mpi/Running_OpenMPI/) -- scorep/1.2.3-icc-impi, for usage with [Intel Compiler](../compilers.html)> and [Intel MPI](../mpi/running-mpich2/)>. +* scorep/1.2.3-gcc-openmpi, for usage with [GNU Compiler](../compilers/) and [OpenMPI](../mpi/Running_OpenMPI/) +* scorep/1.2.3-icc-impi, for usage with [Intel Compiler](../compilers/)> and [Intel MPI](../mpi/running-mpich2/)>. + +## Instrumentation -Instrumentation ---------------- There are three ways to instrument your parallel applications in order to enable performance data collection: -1. Automated instrumentation using compiler -2. Manual instrumentation using API calls -3. Manual instrumentation using directives +1. Automated instrumentation using compiler +1. Manual instrumentation using API calls +1. Manual instrumentation using directives -### Automated instrumentation +### Automated Instrumentation is the easiest method. Score-P will automatically add instrumentation to every routine entry and exit using compiler hooks, and will intercept MPI calls and OpenMP regions. This method might, however, produce a large number of data. If you want to focus on profiler a specific regions of your code, consider using the manual instrumentation methods. To use automated instrumentation, simply prepend scorep to your compilation command. For example, replace: @@ -35,16 +34,16 @@ $ mpif90 -o myapp foo.o bar.o with: ```bash -$ scorep mpif90 -c foo.f90 -$ scorep mpif90 -c bar.f90 -$ scorep mpif90 -o myapp foo.o bar.o +$ scorep mpif90 -c foo.f90 +$ scorep mpif90 -c bar.f90 +$ scorep mpif90 -o myapp foo.o bar.o ``` -Usually your program is compiled using a Makefile or similar script, so it advisable to add the scorep command to your definition of variables CC, CXX, FCC etc. +Usually your program is compiled using a Makefile or similar script, so it advisable to add the scorep command to your definition of variables CC, CXX, FCC etc. -It is important that scorep is prepended also to the linking command, in order to link with Score-P instrumentation libraries. +It is important that scorep is prepended also to the linking command, in order to link with Score-P instrumentation libraries. -###Manual instrumentation using API calls +### Manual Instrumentation Using API Calls To use this kind of instrumentation, use scorep with switch --user. You will then mark regions to be instrumented by inserting API calls. @@ -77,9 +76,9 @@ An example in C/C++ : Please refer to the [documentation for description of the API](https://silc.zih.tu-dresden.de/scorep-current/pdf/scorep.pdf). -###Manual instrumentation using directives +### Manual Instrumentation Using Directives -This method uses POMP2 directives to mark regions to be instrumented. To use this method, use command scorep --pomp. +This method uses POMP2 directives to mark regions to be instrumented. To use this method, use command scorep --pomp. Example directives in C/C++ : diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/total-view.md b/docs.it4i/anselm/software/debuggers/total-view.md similarity index 78% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/total-view.md rename to docs.it4i/anselm/software/debuggers/total-view.md index 6df6ba9b95bfe15c151de6700b17a7e0e8d5f4c6..b4f710675111efe35ea5779625ac53046bc2722b 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/total-view.md +++ b/docs.it4i/anselm/software/debuggers/total-view.md @@ -1,10 +1,9 @@ -Total View -========== +# Total View -##TotalView is a GUI-based source code multi-process, multi-thread debugger. +TotalView is a GUI-based source code multi-process, multi-thread debugger. + +## License and Limitations for Anselm Users -License and Limitations for Anselm Users ----------------------------------------- On Anselm users can debug OpenMP or MPI code that runs up to 64 parallel processes. These limitation means that: ```bash @@ -21,15 +20,15 @@ You can check the status of the licenses here: # totalview # ------------------------------------------------- - # FEATURE TOTAL USED AVAIL + # FEATURE TOTAL USED AVAIL # ------------------------------------------------- TotalView_Team 64 0 64 Replay 64 0 64 CUDA 64 0 64 ``` -Compiling Code to run with TotalView ------------------------------------- +## Compiling Code to Run With TotalView + ### Modules Load all necessary modules to compile the code. For example: @@ -54,17 +53,16 @@ Compile the code: mpif90 -g -O0 -o test_debug test.f ``` -### Compiler flags +### Compiler Flags Before debugging, you need to compile your code with theses flags: -!!! Note "Note" - **-g** : Generates extra debugging information usable by GDB. **-g3** includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. +!!! note + \* **-g** : Generates extra debugging information usable by GDB. **-g3** includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. + \* **-O0** : Suppress all optimizations. - **-O0** : Suppress all optimizations. +## Starting a Job With TotalView -Starting a Job with TotalView ------------------------------ Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh: ```bash @@ -81,7 +79,7 @@ From the login node an interactive session with X windows forwarding (-X option) Then launch the debugger with the totalview command followed by the name of the executable to debug. -### Debugging a serial code +### Debugging a Serial Code To debug a serial code use: @@ -89,12 +87,12 @@ To debug a serial code use: totalview test_debug ``` -### Debugging a parallel code - option 1 +### Debugging a Parallel Code - Option 1 To debug a parallel code compiled with **OpenMPI** you need to setup your TotalView environment: -!!! Note "Note" - **Please note:** To be able to run parallel debugging procedure from the command line without stopping the debugger in the mpiexec source code you have to add the following function to your **~/.tvdrc** file: +!!! hint + To be able to run parallel debugging procedure from the command line without stopping the debugger in the mpiexec source code you have to add the following function to your `~/.tvdrc` file: ```bash proc mpi_auto_run_starter {loaded_id} { @@ -115,14 +113,15 @@ To debug a parallel code compiled with **OpenMPI** you need to setup your TotalV dlappend TV::image_load_callbacks mpi_auto_run_starter ``` + The source code of this function can be also found in ```bash /apps/mpi/openmpi/intel/1.6.5/etc/openmpi-totalview.tcl ``` -!!! Note "Note" - You can also add only following line to you ~/.tvdrc file instead of the entire function: +!!! note + You can also add only following line to you ~/.tvdrc file instead of the entire function: **source /apps/mpi/openmpi/intel/1.6.5/etc/openmpi-totalview.tcl** You need to do this step only once. @@ -141,7 +140,7 @@ At this point the main TotalView GUI window will appear and you can insert the b  -### Debugging a parallel code - option 2 +### Debugging a Parallel Code - Option 2 Other option to start new parallel debugging session from a command line is to let TotalView to execute mpirun by itself. In this case user has to specify a MPI implementation used to compile the source code. @@ -157,6 +156,6 @@ After running previous command you will see the same window as shown in the scre More information regarding the command line parameters of the TotalView can be found TotalView Reference Guide, Chapter 7: TotalView Command Syntax. -Documentation -------------- +## Documentation + [1] The [TotalView documentation](http://www.roguewave.com/support/product-documentation/totalview-family.aspx#totalview) web page is a good resource for learning more about some of the advanced TotalView features. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md b/docs.it4i/anselm/software/debuggers/valgrind.md similarity index 90% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md rename to docs.it4i/anselm/software/debuggers/valgrind.md index 1b0919431ac1fcb01425837d0087374d6ac6685e..2602fdbf24c9bdf16503740541ed81c536628b5a 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md +++ b/docs.it4i/anselm/software/debuggers/valgrind.md @@ -1,32 +1,31 @@ -Valgrind -======== +# Valgrind Valgrind is a tool for memory debugging and profiling. -About Valgrind --------------- +## About Valgrind + Valgrind is an open-source tool, used mainly for debuggig memory-related problems, such as memory leaks, use of uninitalized memory etc. in C/C++ applications. The toolchain was however extended over time with more functionality, such as debugging of threaded applications, cache profiling, not limited only to C/C++. Valgind is an extremely useful tool for debugging memory errors such as [off-by-one](http://en.wikipedia.org/wiki/Off-by-one_error). Valgrind uses a virtual machine and dynamic recompilation of binary code, because of that, you can expect that programs being debugged by Valgrind run 5-100 times slower. The main tools available in Valgrind are : -- **Memcheck**, the original, must used and default tool. Verifies memory access in you program and can detect use of unitialized memory, out of bounds memory access, memory leaks, double free, etc. -- **Massif**, a heap profiler. -- **Hellgrind** and **DRD** can detect race conditions in multi-threaded applications. -- **Cachegrind**, a cache profiler. -- **Callgrind**, a callgraph analyzer. -- For a full list and detailed documentation, please refer to the [official Valgrind documentation](http://valgrind.org/docs/). +* **Memcheck**, the original, must used and default tool. Verifies memory access in you program and can detect use of unitialized memory, out of bounds memory access, memory leaks, double free, etc. +* **Massif**, a heap profiler. +* **Hellgrind** and **DRD** can detect race conditions in multi-threaded applications. +* **Cachegrind**, a cache profiler. +* **Callgrind**, a callgraph analyzer. +* For a full list and detailed documentation, please refer to the [official Valgrind documentation](http://valgrind.org/docs/). + +## Installed Versions -Installed versions ------------------- There are two versions of Valgrind available on Anselm. -- Version 3.6.0, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. -- Version 3.9.0 with support for Intel MPI, available in [module](../../environment-and-modules/) valgrind/3.9.0-impi. After loading the module, this version replaces the default valgrind. +* Version 3.6.0, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. +* Version 3.9.0 with support for Intel MPI, available in [module](../../environment-and-modules/) valgrind/3.9.0-impi. After loading the module, this version replaces the default valgrind. + +## Usage -Usage ------ Compile the application which you want to debug as usual. It is advisable to add compilation flags -g (to add debugging information to the binary so that you will see original source code lines in the output) and -O0 (to disable compiler optimizations). For example, lets look at this C code, which has two problems : @@ -56,7 +55,7 @@ Now, compile it with Intel compiler : Now, lets run it with Valgrind. The syntax is : - *valgrind [valgrind options] <your program binary> [your program options]* +`valgrind [valgrind options] <your program binary> [your program options]` If no Valgrind options are specified, Valgrind defaults to running Memcheck tool. Please refer to the Valgrind documentation for a full description of command line options. @@ -132,8 +131,8 @@ In the output we can see that Valgrind has detected both errors - the off-by-one Now we can see that the memory leak is due to the malloc() at line 6. -Usage with MPI ---------------------------- +## Usage With MPI + Although Valgrind is not primarily a parallel debugger, it can be used to debug parallel applications as well. When launching your parallel applications, prepend the valgrind command. For example : ```bash @@ -157,7 +156,7 @@ The default version without MPI support will however report a large number of fa ==30166== by 0x4008BD: main (valgrind-example-mpi.c:18) ``` -so it is better to use the MPI-enabled valgrind from module. The MPI version requires library /apps/tools/valgrind/3.9.0/impi/lib/valgrind/libmpiwrap-amd64-linux.so, which must be included in the LD_PRELOAD environment variable. +so it is better to use the MPI-enabled valgrind from module. The MPI version requires library /apps/tools/valgrind/3.9.0/impi/lib/valgrind/libmpiwrap-amd64-linux.so, which must be included in the LD_PRELOAD environment variable. Lets look at this MPI example : diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md b/docs.it4i/anselm/software/debuggers/vampir.md similarity index 70% rename from docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md rename to docs.it4i/anselm/software/debuggers/vampir.md index 129eb41ddbf9443e606f8254bb654846fe570877..1c3009c8a4fe820473b812ec0067a83e3d1922d7 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md +++ b/docs.it4i/anselm/software/debuggers/vampir.md @@ -1,12 +1,11 @@ -hVampir -====== +# Vampir -Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](../../../salomon/software/debuggers/score-p/)) first to collect the traces. +Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces.  -Installed versions ------------------- +## Installed Versions + Version 8.5.0 is currently installed as module Vampir/8.5.0 : ```bash @@ -14,10 +13,10 @@ Version 8.5.0 is currently installed as module Vampir/8.5.0 : $ vampir & ``` -User manual ------------ +## User Manual + You can find the detailed user manual in PDF format in $EBROOTVAMPIR/doc/vampir-manual.pdf -References ----------- +## References + [1]. <https://www.vampir.eu> diff --git a/docs.it4i/anselm-cluster-documentation/software/gpi2.md b/docs.it4i/anselm/software/gpi2.md similarity index 77% rename from docs.it4i/anselm-cluster-documentation/software/gpi2.md rename to docs.it4i/anselm/software/gpi2.md index d61fbed6f984945d0751ae4abf6e2e241ddffc1b..ec96e2653a3bfeb9614be13b969ff3273b3ee255 100644 --- a/docs.it4i/anselm-cluster-documentation/software/gpi2.md +++ b/docs.it4i/anselm/software/gpi2.md @@ -1,16 +1,13 @@ -GPI-2 -===== +# GPI-2 -##A library that implements the GASPI specification +## Introduction -Introduction ------------- Programming Next Generation Supercomputers: GPI-2 is an API library for asynchronous interprocess, cross-node communication. It provides a flexible, scalable and fault tolerant interface for parallel applications. The GPI-2 library ([www.gpi-site.com/gpi2/](http://www.gpi-site.com/gpi2/)) implements the GASPI specification (Global Address Space Programming Interface, [www.gaspi.de](http://www.gaspi.de/en/project.html)). GASPI is a Partitioned Global Address Space (PGAS) API. It aims at scalable, flexible and failure tolerant computing in massively parallel environments. -Modules -------- +## Modules + The GPI-2, version 1.0.2 is available on Anselm via module gpi2: ```bash @@ -19,14 +16,14 @@ The GPI-2, version 1.0.2 is available on Anselm via module gpi2: The module sets up environment variables, required for linking and running GPI-2 enabled applications. This particular command loads the default module, which is gpi2/1.0.2 -Linking -------- -!!! Note "Note" - Link with -lGPI2 -libverbs +## Linking + +!!! note + Link with -lGPI2 -libverbs Load the gpi2 module. Link using **-lGPI2** and **-libverbs** switches to link your code against GPI-2. The GPI-2 requires the OFED infinband communication library ibverbs. -### Compiling and linking with Intel compilers +### Compiling and Linking With Intel Compilers ```bash $ module load intel @@ -34,7 +31,7 @@ Load the gpi2 module. Link using **-lGPI2** and **-libverbs** switches to link y $ icc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs ``` -### Compiling and linking with GNU compilers +### Compiling and Linking With GNU Compilers ```bash $ module load gcc @@ -42,11 +39,10 @@ Load the gpi2 module. Link using **-lGPI2** and **-libverbs** switches to link y $ gcc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lGPI2 -libverbs ``` -Running the GPI-2 codes ------------------------ +## Running the GPI-2 Codes -!!! Note "Note" - gaspi_run starts the GPI-2 application +!!! note + gaspi_run starts the GPI-2 application The gaspi_run utility is used to start and run GPI-2 applications: @@ -54,7 +50,7 @@ The gaspi_run utility is used to start and run GPI-2 applications: $ gaspi_run -m machinefile ./myprog.x ``` -A machine file (**machinefile**) with the hostnames of nodes where the application will run, must be provided. The machinefile lists all nodes on which to run, one entry per node per process. This file may be hand created or obtained from standard $PBS_NODEFILE: +A machine file (** machinefile **) with the hostnames of nodes where the application will run, must be provided. The machinefile lists all nodes on which to run, one entry per node per process. This file may be hand created or obtained from standard $PBS_NODEFILE: ```bash $ cut -f1 -d"." $PBS_NODEFILE > machinefile @@ -80,8 +76,8 @@ machinefle: This machinefile will run 4 GPI-2 processes, 2 on node cn79 o 2 on node cn80. -!!! Note "Note" - Use the **mpiprocs** to control how many GPI-2 processes will run per node +!!! note + Use the **mpiprocs**to control how many GPI-2 processes will run per node Example: @@ -91,15 +87,14 @@ Example: This example will produce $PBS_NODEFILE with 16 entries per node. -### gaspi_logger +### Gaspi_logger -!!! Note "Note" - gaspi_logger views the output form GPI-2 application ranks +!!! note + gaspi_logger views the output form GPI-2 application ranks The gaspi_logger utility is used to view the output from all nodes except the master node (rank 0). The gaspi_logger is started, on another session, on the master node - the node where the gaspi_run is executed. The output of the application, when called with gaspi_printf(), will be redirected to the gaspi_logger. Other I/O routines (e.g. printf) will not. -Example -------- +## Example Following is an example GPI-2 enabled code: @@ -169,4 +164,4 @@ At the same time, in another session, you may start the gaspi logger: [cn80:0] Hello from rank 1 of 2 ``` -In this example, we compile the helloworld_gpi.c code using the **gnu compiler** (gcc) and link it to the GPI-2 and ibverbs library. The library search path is compiled in. For execution, we use the qexp queue, 2 nodes 1 core each. The GPI module must be loaded on the master compute node (in this example the cn79), gaspi_logger is used from different session to view the output of the second process. +In this example, we compile the helloworld_gpi.c code using the **gnu compiler**(gcc) and link it to the GPI-2 and ibverbs library. The library search path is compiled in. For execution, we use the qexp queue, 2 nodes 1 core each. The GPI module must be loaded on the master compute node (in this example the cn79), gaspi_logger is used from different session to view the output of the second process. diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md b/docs.it4i/anselm/software/intel-suite/intel-compilers.md similarity index 72% rename from docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md rename to docs.it4i/anselm/software/intel-suite/intel-compilers.md index 75ea441489d47ed7d5ea7f4e575e54ccffeba6c6..66de3b77a06d7333464336ada10d68cd3a899aa8 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md +++ b/docs.it4i/anselm/software/intel-suite/intel-compilers.md @@ -1,5 +1,4 @@ -Intel Compilers -=============== +# Intel Compilers The Intel compilers version 13.1.1 are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler. @@ -29,9 +28,9 @@ The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP paral Read more at <http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/compiler/cpp-lin/index.htm> -Sandy Bridge/Haswell binary compatibility ------------------------------------------ +## Sandy Bridge/Haswell Binary Compatibility + Anselm nodes are currently equipped with Sandy Bridge CPUs, while Salomon will use Haswell architecture. >The new processors are backward compatible with the Sandy Bridge nodes, so all programs that ran on the Sandy Bridge processors, should also run on the new Haswell nodes. >To get optimal performance out of the Haswell processors a program should make use of the special AVX2 instructions for this processor. One can do this by recompiling codes with the compiler flags >designated to invoke these instructions. For the Intel compiler suite, there are two ways of doing this: -- Using compiler flag (both for Fortran and C): -xCORE-AVX2. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge nodes. -- Using compiler flags (both for Fortran and C): -xAVX -axCORE-AVX2. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. +* Using compiler flag (both for Fortran and C): -xCORE-AVX2. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge nodes. +* Using compiler flags (both for Fortran and C): -xAVX -axCORE-AVX2. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md b/docs.it4i/anselm/software/intel-suite/intel-debugger.md similarity index 90% rename from docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md rename to docs.it4i/anselm/software/intel-suite/intel-debugger.md index 92e19f9c03e985fc8660d19a4a5df5a09942f4be..f13086df7431676a95a75b5258a10667a3464c57 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md +++ b/docs.it4i/anselm/software/intel-suite/intel-debugger.md @@ -1,8 +1,7 @@ -Intel Debugger -============== +# Intel Debugger + +## Debugging Serial Applications -Debugging serial applications ------------------------------ The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI. ```bash @@ -33,11 +32,11 @@ Example: In this example, we allocate 1 full compute node, compile program myprog.c with debugging options -O0 -g and run the idb debugger interactively on the myprog.x executable. The GUI access is via X11 port forwarding provided by the PBS workload manager. -Debugging parallel applications -------------------------------- +## Debugging Parallel Applications + Intel debugger is capable of debugging multithreaded and MPI parallel programs as well. -### Small number of MPI ranks +### Small Number of MPI Ranks For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the X display. Using Intel MPI, this may be done in following way: @@ -52,7 +51,7 @@ For debugging small number of MPI ranks, you may execute and debug each rank in In this example, we allocate 2 full compute node, run xterm on each node and start idb debugger in command line mode, debugging two ranks of mympiprog.x application. The xterm will pop up for each rank, with idb prompt ready. The example is not limited to use of Intel MPI -### Large number of MPI ranks +### Large Number of MPI Ranks Run the idb debugger from within the MPI debug option. This will cause the debugger to bind to all ranks and provide aggregated outputs across the ranks, pausing execution automatically just after startup. You may then set break points and step the execution manually. Using Intel MPI: @@ -65,11 +64,10 @@ Run the idb debugger from within the MPI debug option. This will cause the debug $ mpirun -n 32 -idb ./mympiprog.x ``` -### Debugging multithreaded application +### Debugging Multithreaded Application Run the idb debugger in GUI mode. The menu Parallel contains number of tools for debugging multiple threads. One of the most useful tools is the **Serialize Execution** tool, which serializes execution of concurrent threads for easy orientation and identification of concurrency related bugs. -Further information -------------------- -Exhaustive manual on idb features and usage is published at [Intel website](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/debugger/user_guide/index.htm) +## Further Information +Exhaustive manual on idb features and usage is published at [Intel website](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/debugger/user_guide/index.htm) diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-integrated-performance-primitives.md b/docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md similarity index 90% rename from docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-integrated-performance-primitives.md rename to docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md index 08067b718f70cd6ad18bd7d62ceb3997c5799223..b92f8d05f62d9305f9624e592d388cf2744b5081 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-integrated-performance-primitives.md +++ b/docs.it4i/anselm/software/intel-suite/intel-integrated-performance-primitives.md @@ -1,12 +1,11 @@ -Intel IPP -========= +# Intel IPP + +## Intel Integrated Performance Primitives -Intel Integrated Performance Primitives ---------------------------------------- Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX vector instructions is available, via module ipp. The IPP is a very rich library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax, as well as cryptographic functions, linear algebra functions and many more. -!!! Note "Note" - Check out IPP before implementing own math functions for data processing, it is likely already there. +!!! note + Check out IPP before implementing own math functions for data processing, it is likely already there. ```bash $ module load ipp @@ -14,8 +13,7 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX vector The module sets up environment variables, required for linking and running ipp enabled applications. -IPP example ------------ +## IPP Example ```cpp #include "ipp.h" @@ -76,8 +74,8 @@ You will need the ipp module loaded to run the ipp enabled executable. This may $ icc testipp.c -o testipp.x -Wl,-rpath=$LIBRARY_PATH -lippi -lipps -lippcore ``` -Code samples and documentation ------------------------------- +## Code Samples and Documentation + Intel provides number of [Code Samples for IPP](https://software.intel.com/en-us/articles/code-samples-for-intel-integrated-performance-primitives-library), illustrating use of IPP. Read full documentation on IPP [on Intel website,](http://software.intel.com/sites/products/search/search.php?q=&x=15&y=6&product=ipp&version=7.1&docos=lin) in particular the [IPP Reference manual.](http://software.intel.com/sites/products/documentation/doclib/ipp_sa/71/ipp_manual/index.htm) diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md b/docs.it4i/anselm/software/intel-suite/intel-mkl.md similarity index 65% rename from docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md rename to docs.it4i/anselm/software/intel-suite/intel-mkl.md index 62bb0fae83c406ddec06615a41e91a071806962d..aed92ae69da6f721f676fa5e4180945711fe5fba 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-mkl.md +++ b/docs.it4i/anselm/software/intel-suite/intel-mkl.md @@ -1,18 +1,17 @@ -Intel MKL -========= +# Intel MKL + +## Intel Math Kernel Library -Intel Math Kernel Library -------------------------- Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL provides these basic math kernels: -- BLAS (level 1, 2, and 3) and LAPACK linear algebra routines, offering vector, vector-matrix, and matrix-matrix operations. -- The PARDISO direct sparse solver, an iterative sparse solver, and supporting sparse BLAS (level 1, 2, and 3) routines for solving sparse systems of equations. -- ScaLAPACK distributed processing linear algebra routines for Linux* and Windows* operating systems, as well as the Basic Linear Algebra Communications Subprograms (BLACS) and the Parallel Basic Linear Algebra Subprograms (PBLAS). -- Fast Fourier transform (FFT) functions in one, two, or three dimensions with support for mixed radices (not limited to sizes that are powers of 2), as well as distributed versions of these functions. -- Vector Math Library (VML) routines for optimized mathematical operations on vectors. -- Vector Statistical Library (VSL) routines, which offer high-performance vectorized random number generators (RNG) for several probability distributions, convolution and correlation routines, and summary statistics functions. -- Data Fitting Library, which provides capabilities for spline-based approximation of functions, derivatives and integrals of functions, and search. -- Extended Eigensolver, a shared memory version of an eigensolver based on the Feast Eigenvalue Solver. +* BLAS (level 1, 2, and 3) and LAPACK linear algebra routines, offering vector, vector-matrix, and matrix-matrix operations. +* The PARDISO direct sparse solver, an iterative sparse solver, and supporting sparse BLAS (level 1, 2, and 3) routines for solving sparse systems of equations. +* ScaLAPACK distributed processing linear algebra routines for Linux and Windows operating systems, as well as the Basic Linear Algebra Communications Subprograms (BLACS) and the Parallel Basic Linear Algebra Subprograms (PBLAS). +* Fast Fourier transform (FFT) functions in one, two, or three dimensions with support for mixed radices (not limited to sizes that are powers of 2), as well as distributed versions of these functions. +* Vector Math Library (VML) routines for optimized mathematical operations on vectors. +* Vector Statistical Library (VSL) routines, which offer high-performance vectorized random number generators (RNG) for several probability distributions, convolution and correlation routines, and summary statistics functions. +* Data Fitting Library, which provides capabilities for spline-based approximation of functions, derivatives and integrals of functions, and search. +* Extended Eigensolver, a shared memory version of an eigensolver based on the Feast Eigenvalue Solver. For details see the [Intel MKL Reference Manual](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mklman/index.htm). @@ -24,23 +23,23 @@ Intel MKL version 13.5.192 is available on Anselm The module sets up environment variables, required for linking and running mkl enabled applications. The most important variables are the $MKLROOT, $MKL_INC_DIR, $MKL_LIB_DIR and $MKL_EXAMPLES -!!! Note "Note" - The MKL library may be linked using any compiler. With intel compiler use -mkl option to link default threaded MKL. +!!! note + The MKL library may be linked using any compiler. With intel compiler use -mkl option to link default threaded MKL. ### Interfaces The MKL library provides number of interfaces. The fundamental once are the LP64 and ILP64. The Intel MKL ILP64 libraries use the 64-bit integer type (necessary for indexing large arrays, with more than 231^-1 elements), whereas the LP64 libraries index arrays with the 32-bit integer type. -|Interface|Integer type| -|---|---| -|LP64|32-bit, int, integer(kind=4), MPI_INT| -|ILP64|64-bit, long int, integer(kind=8), MPI_INT64| +| Interface | Integer type | +| --------- | -------------------------------------------- | +| LP64 | 32-bit, int, integer(kind=4), MPI_INT | +| ILP64 | 64-bit, long int, integer(kind=8), MPI_INT64 | ### Linking Linking MKL libraries may be complex. Intel [mkl link line advisor](http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor) helps. See also [examples](intel-mkl/#examples) below. -You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line: +You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line: ```bash $ icc .... -Wl,-rpath=$LIBRARY_PATH ... @@ -48,8 +47,8 @@ You will need the mkl module loaded to run the mkl enabled executable. This may ### Threading -!!! Note "Note" - Advantage in using the MKL library is that it brings threaded parallelization to applications that are otherwise not parallel. +!!! note + Advantage in using the MKL library is that it brings threaded parallelization to applications that are otherwise not parallel. For this to work, the application must link the threaded MKL library (default). Number and behaviour of MKL threads may be controlled via the OpenMP environment variables, such as OMP_NUM_THREADS and KMP_AFFINITY. MKL_NUM_THREADS takes precedence over OMP_NUM_THREADS @@ -60,11 +59,11 @@ For this to work, the application must link the threaded MKL library (default). The application will run with 16 threads with affinity optimized for fine grain parallelization. -Examples ------------- +## Examples + Number of examples, demonstrating use of the MKL library and its linking is available on Anselm, in the $MKL_EXAMPLES directory. In the examples below, we demonstrate linking MKL to Intel and GNU compiled program for multi-threaded matrix multiplication. -### Working with examples +### Working With Examples ```bash $ module load intel @@ -75,9 +74,9 @@ Number of examples, demonstrating use of the MKL library and its linking is avai $ make sointel64 function=cblas_dgemm ``` -In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL example suite installed on Anselm. +In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL example suite installed on Anselm. -### Example: MKL and Intel compiler +### Example: MKL and Intel Compiler ```bash $ module load intel @@ -89,16 +88,16 @@ In this example, we compile, link and run the cblas_dgemm example, demonstratin $ ./cblas_dgemmx.x data/cblas_dgemmx.d ``` -In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL with icc -mkl option. Using the -mkl option is equivalent to: +In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL with icc -mkl option. Using the -mkl option is equivalent to: ```bash $ icc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x -I$MKL_INC_DIR -L$MKL_LIB_DIR -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 ``` -In this example, we compile and link the cblas_dgemm example, using LP64 interface to threaded MKL and Intel OMP threads implementation. +In this example, we compile and link the cblas_dgemm example, using LP64 interface to threaded MKL and Intel OMP threads implementation. -### Example: MKL and GNU compiler +### Example: MKL and GNU Compiler ```bash $ module load gcc @@ -112,12 +111,12 @@ In this example, we compile and link the cblas_dgemm example, using LP64 interf $ ./cblas_dgemmx.x data/cblas_dgemmx.d ``` -In this example, we compile, link and run the cblas_dgemm example, using LP64 interface to threaded MKL and gnu OMP threads implementation. +In this example, we compile, link and run the cblas_dgemm example, using LP64 interface to threaded MKL and gnu OMP threads implementation. + +## MKL and MIC Accelerators -MKL and MIC accelerators ------------------------- The MKL is capable to automatically offload the computations o the MIC accelerator. See section [Intel XeonPhi](../intel-xeon-phi/) for details. -Further reading ---------------- +## Further Reading + Read more on [Intel website](http://software.intel.com/en-us/intel-mkl), in particular the [MKL users guide](https://software.intel.com/en-us/intel-mkl/documentation/linux). diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-tbb.md b/docs.it4i/anselm/software/intel-suite/intel-tbb.md similarity index 88% rename from docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-tbb.md rename to docs.it4i/anselm/software/intel-suite/intel-tbb.md index ccf79fef061da0c6078779f0d29f417a266216e6..3c2495ba8c0592df6556ab7c41c078dd3cedf5af 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-tbb.md +++ b/docs.it4i/anselm/software/intel-suite/intel-tbb.md @@ -1,8 +1,7 @@ -Intel TBB -========= +# Intel TBB + +## Intel Threading Building Blocks -Intel Threading Building Blocks -------------------------------- Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](../intel-xeon-phi/). @@ -14,11 +13,11 @@ Intel TBB version 4.1 is available on Anselm The module sets up environment variables, required for linking and running tbb enabled applications. -!!! Note "Note" - Link the tbb library, using -ltbb +!!! note + Link the tbb library, using -ltbb + +## Examples -Examples --------- Number of examples, demonstrating use of TBB and its built-in scheduler is available on Anselm, in the $TBB_EXAMPLES directory. ```bash @@ -38,7 +37,6 @@ You will need the tbb module loaded to run the tbb enabled executable. This may $ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb ``` -Further reading ---------------- -Read more on Intel website, <http://software.intel.com/sites/products/documentation/doclib/tbb_sa/help/index.htm> +## Further Reading +Read more on Intel website, <http://software.intel.com/sites/products/documentation/doclib/tbb_sa/help/index.htm> diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md b/docs.it4i/anselm/software/intel-suite/introduction.md similarity index 80% rename from docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md rename to docs.it4i/anselm/software/intel-suite/introduction.md index 206c70444ca5dcb00613409a3ef27a17f9730ceb..f9f6f4093a1ed659c7cd4ed63bea944b4dd40ffe 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/introduction.md +++ b/docs.it4i/anselm/software/intel-suite/introduction.md @@ -1,18 +1,15 @@ -Intel Parallel Studio -===================== +# Intel Parallel Studio The Anselm cluster provides following elements of the Intel Parallel Studio XE -|Intel Parallel Studio XE| -|-------------------------------------------------| -|Intel Compilers| -|Intel Debugger| -|Intel MKL Library| -|Intel Integrated Performance Primitives Library| -|Intel Threading Building Blocks Library| +* Intel Compilers +* Intel Debugger +* Intel MKL Library +* Intel Integrated Performance Primitives Library +* Intel Threading Building Blocks Library + +## Intel Compilers -Intel compilers ---------------- The Intel compilers version 13.1.3 are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler. ```bash @@ -23,8 +20,8 @@ The Intel compilers version 13.1.3 are available, via module intel. The compiler Read more at the [Intel Compilers](intel-compilers/) page. -Intel debugger --------------- +## Intel Debugger + The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use X display for running the GUI. ```bash @@ -34,8 +31,8 @@ The intel debugger version 13.0 is available, via module intel. The debugger wor Read more at the [Intel Debugger](intel-debugger/) page. -Intel Math Kernel Library -------------------------- +## Intel Math Kernel Library + Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL unites and provides these basic components: BLAS, LAPACK, ScaLapack, PARDISO, FFT, VML, VSL, Data fitting, Feast Eigensolver and many more. ```bash @@ -44,8 +41,8 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e Read more at the [Intel MKL](intel-mkl/) page. -Intel Integrated Performance Primitives ---------------------------------------- +## Intel Integrated Performance Primitives + Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is available, via module ipp. The IPP is a library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax and many more. ```bash @@ -54,8 +51,8 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai Read more at the [Intel IPP](intel-integrated-performance-primitives/) page. -Intel Threading Building Blocks -------------------------------- +## Intel Threading Building Blocks + Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. It is designed to promote scalable data parallel programming. Additionally, it fully supports nested parallelism, so you can build larger parallel components from smaller parallel components. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. ```bash diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md b/docs.it4i/anselm/software/intel-xeon-phi.md similarity index 85% rename from docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md rename to docs.it4i/anselm/software/intel-xeon-phi.md index 80e76efecfacbe0a7158a13c0ca4658a695053ff..6937a4453e27aa9cc8b10b5594d7d2c48a72b03a 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-xeon-phi.md +++ b/docs.it4i/anselm/software/intel-xeon-phi.md @@ -1,28 +1,27 @@ -Intel Xeon Phi -============== +# Intel Xeon Phi -##A guide to Intel Xeon Phi usage +## Guide to Intel Xeon Phi Usage Intel Xeon Phi can be programmed in several modes. The default mode on Anselm is offload mode, but all modes described in this document are supported. -Intel Utilities for Xeon Phi ----------------------------- +## Intel Utilities for Xeon Phi + To get access to a compute node with Intel Xeon Phi accelerator, use the PBS interactive session ```bash - $ qsub -I -q qmic -A NONE-0-0 +$ qsub -I -q qmic -A NONE-0-0 ``` To set up the environment module "Intel" has to be loaded ```bash - $ module load intel/13.5.192 +$ module load intel/13.5.192 ``` Information about the hardware can be obtained by running the micinfo program on the host. ```bash - $ /usr/bin/micinfo +$ /usr/bin/micinfo ``` The output of the "micinfo" utility executed on one of the Anselm node is as follows. (note: to get PCIe related details the command has to be run with root privileges) @@ -89,22 +88,25 @@ The output of the "micinfo" utility executed on one of the Anselm node is as fol GDDR Voltage : 1501000 uV ``` -Offload Mode ------------- +## Offload Mode + To compile a code for Intel Xeon Phi a MPSS stack has to be installed on the machine where compilation is executed. Currently the MPSS stack is only installed on compute nodes equipped with accelerators. ```bash - $ qsub -I -q qmic -A NONE-0-0 - $ module load intel/13.5.192 +$ qsub -I -q qmic -A NONE-0-0 +$ module load intel/13.5.192 ``` For debugging purposes it is also recommended to set environment variable "OFFLOAD_REPORT". Value can be set from 0 to 3, where higher number means more debugging information. ```bash - export OFFLOAD_REPORT=3 +export OFFLOAD_REPORT=3 ``` -A very basic example of code that employs offload programming technique is shown in the next listing. Please note that this code is sequential and utilizes only single core of the accelerator. +A very basic example of code that employs offload programming technique is shown in the next listing. + +!!! note + This code is sequential and utilizes only single core of the accelerator. ```bash $ vim source-offload.cpp @@ -230,20 +232,20 @@ During the compilation Intel compiler shows which loops have been vectorized in Some interesting compiler flags useful not only for code debugging are: -!!! Note "Note" - Debugging +!!! note + Debugging openmp_report[0|1|2] - controls the compiler based vectorization diagnostic level vec-report[0|1|2] - controls the OpenMP parallelizer diagnostic level Performance ooptimization - xhost - FOR HOST ONLY - to generate AVX (Advanced Vector Extensions) instructions. + xhost - FOR HOST ONLY - to generate AVX (Advanced Vector Extensions) instructions. + +## Automatic Offload Using Intel MKL Library -Automatic Offload using Intel MKL Library ------------------------------------------ Intel MKL includes an Automatic Offload (AO) feature that enables computationally intensive MKL functions called in user code to benefit from attached Intel Xeon Phi coprocessors automatically and transparently. -Behavioral of automatic offload mode is controlled by functions called within the program or by environmental variables. Complete list of controls is listed [ here](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_userguide_lnx/GUID-3DC4FC7D-A1E4-423D-9C0C-06AB265FFA86.htm). +Behavioral of automatic offload mode is controlled by functions called within the program or by environmental variables. Complete list of controls is listed [here](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_userguide_lnx/GUID-3DC4FC7D-A1E4-423D-9C0C-06AB265FFA86.htm). The Automatic Offload may be enabled by either an MKL function call within the code: @@ -257,9 +259,9 @@ or by setting environment variable $ export MKL_MIC_ENABLE=1 ``` -To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [ Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation). +To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation). -### Automatic offload example +### Automatic Offload Example At first get an interactive PBS session on a node with MIC accelerator and load "intel" module that automatically loads "mkl" module as well. @@ -327,8 +329,8 @@ Following example show how to automatically offload an SGEMM (single precision - } ``` -!!! Note "Note" - Please note: This example is simplified version of an example from MKL. The expanded version can be found here: **$MKL_EXAMPLES/mic_ao/blasc/source/sgemm.c** +!!! note + This example is simplified version of an example from MKL. The expanded version can be found here: `$MKL_EXAMPLES/mic_ao/blasc/source/sgemm.c`. To compile a code using Intel compiler use: @@ -358,8 +360,8 @@ The output of a code should look similar to following listing, where lines start Done ``` -Native Mode ------------ +## Native Mode + In the native mode a program is executed directly on Intel Xeon Phi without involvement of the host machine. Similarly to offload mode, the code is compiled on the host computer with Intel compilers. To compile a code user has to be connected to a compute with MIC and load Intel compilers module. To get an interactive session on a compute node with an Intel Xeon Phi and load the module use following commands: @@ -370,8 +372,8 @@ To compile a code user has to be connected to a compute with MIC and load Intel $ module load intel/13.5.192 ``` -!!! Note "Note" - Please note that particular version of the Intel module is specified. This information is used later to specify the correct library paths. +!!! note + Particular version of the Intel module is specified. This information is used later to specify the correct library paths. To produce a binary compatible with Intel Xeon Phi architecture user has to specify "-mmic" compiler flag. Two compilation examples are shown below. The first example shows how to compile OpenMP parallel code "vect-add.c" for host only: @@ -413,20 +415,19 @@ If the code is parallelized using OpenMP a set of additional libraries is requir mic0 $ export LD_LIBRARY_PATH=/apps/intel/composer_xe_2013.5.192/compiler/lib/mic:$LD_LIBRARY_PATH ``` -!!! Note "Note" - Please note that the path exported in the previous example contains path to a specific compiler (here the version is 5.192). This version number has to match with the version number of the Intel compiler module that was used to compile the code on the host computer. +!!! note + The path exported in the previous example contains path to a specific compiler (here the version is 5.192). This version number has to match with the version number of the Intel compiler module that was used to compile the code on the host computer. For your information the list of libraries and their location required for execution of an OpenMP parallel code on Intel Xeon Phi is: -!!! Note "Note" - /apps/intel/composer_xe_2013.5.192/compiler/lib/mic +!!! note + /apps/intel/composer_xe_2013.5.192/compiler/lib/mic - libiomp5.so - libimf.so - - libsvml.so - - libirng.so - - libintlc.so.5 - + - libsvml.so + - libirng.so + - libintlc.so.5 Finally, to run the compiled code use: @@ -434,8 +435,8 @@ Finally, to run the compiled code use: $ ~/path_to_binary/vect-add-mic ``` -OpenCL -------------------- +## OpenCL + OpenCL (Open Computing Language) is an open standard for general-purpose parallel programming for diverse mix of multi-core CPUs, GPU coprocessors, and other parallel processors. OpenCL provides a flexible execution model and uniform programming environment for software developers to write portable code for systems running on both the CPU and graphics processors or accelerators like the Intel® Xeon Phi. On Anselm OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes. @@ -500,8 +501,8 @@ After executing the complied binary file, following output should be displayed. ... ``` -!!! Note "Note" - More information about this example can be found on Intel website: <http://software.intel.com/en-us/vcsource/samples/caps-basic/> +!!! note + More information about this example can be found on Intel website: <http://software.intel.com/en-us/vcsource/samples/caps-basic/> The second example that can be found in "/apps/intel/opencl-examples" directory is General Matrix Multiply. You can follow the the same procedure to download the example to your directory and compile it. @@ -540,13 +541,12 @@ To see the performance of Intel Xeon Phi performing the DGEMM run the example as ... ``` -!!! Note "Note" - Please note: GNU compiler is used to compile the OpenCL codes for Intel MIC. You do not need to load Intel compiler module. +!!! warning + GNU compiler is used to compile the OpenCL codes for Intel MIC. You do not need to load Intel compiler module. -MPI ------------------ +## MPI -### Environment setup and compilation +### Environment Setup and Compilation Again an MPI code for Intel Xeon Phi has to be compiled on a compute node with accelerator and MPSS software stack installed. To get to a compute node with accelerator use: @@ -562,15 +562,15 @@ The only supported implementation of MPI standard for Intel Xeon Phi is Intel MP To compile an MPI code for host use: -```bash - $ mpiicc -xhost -o mpi-test mpi-test.c -```bash +````bash + $ mpiicc -xhost -o mpi-test mpi-test.c + ```bash -To compile the same code for Intel Xeon Phi architecture use: + To compile the same code for Intel Xeon Phi architecture use: -```bash - $ mpiicc -mmic -o mpi-test-mic mpi-test.c -``` + ```bash + $ mpiicc -mmic -o mpi-test-mic mpi-test.c +```` An example of basic MPI version of "hello-world" example in C language, that can be executed on both host and Xeon Phi is (can be directly copy and pasted to a .c file) @@ -599,18 +599,18 @@ An example of basic MPI version of "hello-world" example in C language, that can } ``` -### MPI programming models +### MPI Programming Models Intel MPI for the Xeon Phi coprocessors offers different MPI programming models: -!!! Note "Note" - **Host-only model** - all MPI ranks reside on the host. The coprocessors can be used by using offload pragmas. (Using MPI calls inside offloaded code is not supported.) +!!! note + **Host-only model** - all MPI ranks reside on the host. The coprocessors can be used by using offload pragmas. (Using MPI calls inside offloaded code is not supported.) - **Coprocessor-only model** - all MPI ranks reside only on the coprocessors. + **Coprocessor-only model** - all MPI ranks reside only on the coprocessors. - **Symmetric model** - the MPI ranks reside on both the host and the coprocessor. Most general MPI case. + **Symmetric model** - the MPI ranks reside on both the host and the coprocessor. Most general MPI case. -###Host-only model +### Host-Only Model In this case all environment variables are set by modules, so to execute the compiled MPI program on a single node, use: @@ -627,12 +627,12 @@ The output should be similar to: Hello world from process 0 of 4 on host cn207 ``` -### Coprocessor-only model +### Coprocessor-Only Model There are two ways how to execute an MPI code on a single coprocessor: 1.) lunch the program using "**mpirun**" from the coprocessor; or 2.) lunch the task using "**mpiexec.hydra**" from a host. -**Execution on coprocessor** +#### Execution on Coprocessor Similarly to execution of OpenMP programs in native mode, since the environmental module are not supported on MIC, user has to setup paths to Intel MPI libraries and binaries manually. One time setup can be done by creating a "**.profile**" file in user's home directory. This file sets up the environment on the MIC automatically once user access to the accelerator through the SSH. @@ -650,10 +650,9 @@ Similarly to execution of OpenMP programs in native mode, since the environmenta export PATH=/apps/intel/impi/4.1.1.036/mic/bin/:$PATH ``` -!!! Note "Note" - Please note: - - this file sets up both environmental variable for both MPI and OpenMP libraries. - - this file sets up the paths to a particular version of Intel MPI library and particular version of an Intel compiler. These versions have to match with loaded modules. +!!! note + \* this file sets up both environmental variable for both MPI and OpenMP libraries. + \* this file sets up the paths to a particular version of Intel MPI library and particular version of an Intel compiler. These versions have to match with loaded modules. To access a MIC accelerator located on a node that user is currently connected to, use: @@ -682,7 +681,7 @@ The output should be similar to: Hello world from process 0 of 4 on host cn207-mic0 ``` -**Execution on host** +#### Execution on Host If the MPI program is launched from host instead of the coprocessor, the environmental variables are not set using the ".profile" file. Therefore user has to specify library paths from the command line when calling "mpiexec". @@ -704,10 +703,9 @@ or using mpirun $ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic ``` -!!! Note "Note" - Please note: - - the full path to the binary has to specified (here: "**>~/mpi-test-mic**") - - the LD_LIBRARY_PATH has to match with Intel MPI module used to compile the MPI code +!!! note + \* the full path to the binary has to specified (here: `>~/mpi-test-mic`) + \* the `LD_LIBRARY_PATH` has to match with Intel MPI module used to compile the MPI code The output should be again similar to: @@ -718,15 +716,17 @@ The output should be again similar to: Hello world from process 0 of 4 on host cn207-mic0 ``` -!!! Note "Note" - Please note that the **"mpiexec.hydra"** requires a file the MIC filesystem. If the file is missing please contact the system administrators. A simple test to see if the file is present is to execute: +!!! note + `mpiexec.hydra` requires a file the MIC filesystem. If the file is missing please contact the system administrators. + +A simple test to see if the file is present is to execute: ```bash $ ssh mic0 ls /bin/pmi_proxy /bin/pmi_proxy ``` -**Execution on host - MPI processes distributed over multiple accelerators on multiple nodes** +#### Execution on Host - MPI Processes Distributed Over Multiple Accelerators on Multiple Nodes** To get access to multiple nodes with MIC accelerator, user has to use PBS to allocate the resources. To start interactive session, that allocates 2 compute nodes = 2 MIC accelerators run qsub command with following parameters: @@ -751,12 +751,11 @@ For example: This output means that the PBS allocated nodes cn204 and cn205, which means that user has direct access to "**cn204-mic0**" and "**cn-205-mic0**" accelerators. -!!! Note "Note" - Please note: At this point user can connect to any of the allocated nodes or any of the allocated MIC accelerators using ssh: - - - to connect to the second node : ** $ ssh cn205** - - to connect to the accelerator on the first node from the first node: **$ ssh cn204-mic0** or **$ ssh mic0** - - to connect to the accelerator on the second node from the first node: **$ ssh cn205-mic0** +!!! note + At this point user can connect to any of the allocated nodes or any of the allocated MIC accelerators using ssh: + - to connect to the second node : `$ ssh cn205` + - to connect to the accelerator on the first node from the first node: `$ ssh cn204-mic0` or `$ ssh mic0` + - to connect to the accelerator on the second node from the first node: `$ ssh cn205-mic0` At this point we expect that correct modules are loaded and binary is compiled. For parallel execution the mpiexec.hydra is used. Again the first step is to tell mpiexec that the MPI can be executed on MIC accelerators by setting up the environmental variable "I_MPI_MIC" @@ -774,6 +773,7 @@ The launch the MPI program use: -host cn204-mic0 -n 4 ~/mpi-test-mic : -host cn205-mic0 -n 6 ~/mpi-test-mic ``` + or using mpirun: ```bash @@ -811,7 +811,7 @@ The same way MPI program can be executed on multiple hosts: : -host cn205 -n 6 ~/mpi-test ``` -###Symmetric model +### Symmetric Model In a symmetric mode MPI programs are executed on both host computer(s) and MIC accelerator(s). Since MIC has a different architecture and requires different binary file produced by the Intel compiler two different files has to be compiled before MPI program is executed. @@ -873,7 +873,7 @@ To run the MPI code using mpirun and the machine file "hosts_file_mix" use: A possible output of the MPI "hello-world" example executed on two hosts and two accelerators is: ```bash - Hello world from process 0 of 8 on host cn204 + Hello world from process 0 of 8 on host cn204 Hello world from process 1 of 8 on host cn204 Hello world from process 2 of 8 on host cn204-mic0 Hello world from process 3 of 8 on host cn204-mic0 @@ -883,22 +883,22 @@ A possible output of the MPI "hello-world" example executed on two hosts and two Hello world from process 7 of 8 on host cn205-mic0 ``` -!!! Note "Note" - Please note: At this point the MPI communication between MIC accelerators on different nodes uses 1Gb Ethernet only. +!!! note + At this point the MPI communication between MIC accelerators on different nodes uses 1Gb Ethernet only. -**Using the PBS automatically generated node-files** +### Using the PBS Automatically Generated Node-Files PBS also generates a set of node-files that can be used instead of manually creating a new one every time. Three node-files are genereated: -!!! Note "Note" - **Host only node-file:** +!!! note + **Host only node-file:** - /lscratch/${PBS_JOBID}/nodefile-cn MIC only node-file: - /lscratch/${PBS_JOBID}/nodefile-mic Host and MIC node-file: - /lscratch/${PBS_JOBID}/nodefile-mix -Please note each host or accelerator is listed only per files. User has to specify how many jobs should be executed per node using "-n" parameter of the mpirun command. +Each host or accelerator is listed only per files. User has to specify how many jobs should be executed per node using `-n` parameter of the mpirun command. + +## Optimization -Optimization ------------- -For more details about optimization techniques please read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization") +For more details about optimization techniques please read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization") diff --git a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md b/docs.it4i/anselm/software/isv_licenses.md similarity index 54% rename from docs.it4i/anselm-cluster-documentation/software/isv_licenses.md rename to docs.it4i/anselm/software/isv_licenses.md index 2303a969bfbf19d0ae5c1ca4b05ef955a3bd86b2..56270b51feca30fe2ec4f297da6cb0d6ee62d6e7 100644 --- a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md +++ b/docs.it4i/anselm/software/isv_licenses.md @@ -1,7 +1,6 @@ -ISV Licenses -============ +# ISV Licenses -##A guide to managing Independent Software Vendor licenses +## Guide to Managing Independent Software Vendor Licenses On Anselm cluster there are also installed commercial software applications, also known as ISV (Independent Software Vendor), which are subjects to licensing. The licenses are limited and their usage may be restricted only to some users or user groups. @@ -9,27 +8,27 @@ Currently Flex License Manager based licensing is supported on the cluster for p If an ISV application was purchased for educational (research) purposes and also for commercial purposes, then there are always two separate versions maintained and suffix "edu" is used in the name of the non-commercial version. -Overview of the licenses usage ------------------------------- -!!! Note "Note" - The overview is generated every minute and is accessible from web or command line interface. +## Overview of the Licenses Usage -### Web interface +!!! note + The overview is generated every minute and is accessible from web or command line interface. + +### Web Interface For each license there is a table, which provides the information about the name, number of available (purchased/licensed), number of used and number of free license features <https://extranet.it4i.cz/anselm/licenses> -### Text interface +### Text Interface For each license there is a unique text file, which provides the information about the name, number of available (purchased/licensed), number of used and number of free license features. The text files are accessible from the Anselm command prompt. -|Product|File with license state|Note| -|---|---| -|ansys|/apps/user/licenses/ansys_features_state.txt|Commercial| -|comsol|/apps/user/licenses/comsol_features_state.txt|Commercial| -|comsol-edu|/apps/user/licenses/comsol-edu_features_state.txt|Non-commercial only| -|matlab|/apps/user/licenses/matlab_features_state.txt|Commercial| -|matlab-edu|/apps/user/licenses/matlab-edu_features_state.txt|Non-commercial only| +| Product | File with license state | Note | +| ---------- | ------------------------------------------------- | ------------------- | +| ansys | /apps/user/licenses/ansys_features_state.txt | Commercial | +| comsol | /apps/user/licenses/comsol_features_state.txt | Commercial | +| comsol-edu | /apps/user/licenses/comsol-edu_features_state.txt | Non-commercial only | +| matlab | /apps/user/licenses/matlab_features_state.txt | Commercial | +| matlab-edu | /apps/user/licenses/matlab-edu_features_state.txt | Non-commercial only | The file has a header which serves as a legend. All the info in the legend starts with a hash (#) so it can be easily filtered when parsing the file via a script. @@ -39,7 +38,7 @@ Example of the Commercial Matlab license state: $ cat /apps/user/licenses/matlab_features_state.txt # matlab # ------------------------------------------------- - # FEATURE TOTAL USED AVAIL + # FEATURE TOTAL USED AVAIL # ------------------------------------------------- MATLAB 1 1 0 SIMULINK 1 0 1 @@ -54,49 +53,49 @@ Example of the Commercial Matlab license state: Statistics_Toolbox 1 0 1 ``` -License tracking in PBS Pro scheduler and users usage ------------------------------------------------------ -Each feature of each license is accounted and checked by the scheduler of PBS Pro. If you ask for certain licenses, the scheduler won't start the job until the asked licenses are free (available). This prevents to crash batch jobs, just because of unavailability of the needed licenses. +## License Tracking in PBS Pro Scheduler and Users Usage -The general format of the name is: +Each feature of each license is accounted and checked by the scheduler of PBS Pro. If you ask for certain licenses, the scheduler won't start the job until the asked licenses are free (available). This prevents to crash batch jobs, just because of unavailability of the needed licenses. -**feature__APP__FEATURE** +The general format of the name is `feature__APP__FEATURE`. Names of applications (APP): -- ansys -- comsol -- comsol-edu -- matlab -- matlab-edu +* ansys +* comsol +* comsol-edu +* matlab +* matlab-edu To get the FEATUREs of a license take a look into the corresponding state file ([see above](isv_licenses/#Licence)), or use: -**Application and List of provided features** -- **ansys** $ grep -v "#" /apps/user/licenses/ansys_features_state.txt | cut -f1 -d' ' -- **comsol** $ grep -v "#" /apps/user/licenses/comsol_features_state.txt | cut -f1 -d' ' -- **comsol-ed** $ grep -v "#" /apps/user/licenses/comsol-edu_features_state.txt | cut -f1 -d' ' -- **matlab** $ grep -v "#" /apps/user/licenses/matlab_features_state.txt | cut -f1 -d' ' -- **matlab-edu** $ grep -v "#" /apps/user/licenses/matlab-edu_features_state.txt | cut -f1 -d' ' +### Application and List of Provided Features + +* **ansys** $ grep -v "#" /apps/user/licenses/ansys_features_state.txt | cut -f1 -d' ' +* **comsol** $ grep -v "#" /apps/user/licenses/comsol_features_state.txt | cut -f1 -d' ' +* **comsol-ed** $ grep -v "#" /apps/user/licenses/comsol-edu_features_state.txt | cut -f1 -d' ' +* **matlab** $ grep -v "#" /apps/user/licenses/matlab_features_state.txt | cut -f1 -d' ' +* **matlab-edu** $ grep -v "#" /apps/user/licenses/matlab-edu_features_state.txt | cut -f1 -d' ' Example of PBS Pro resource name, based on APP and FEATURE name: - |Application |Feature |PBS Pro resource name | - | --- | --- | - |ansys |acfd |feature_ansys_acfd | - |ansys |aa_r |feature_ansys_aa_r | - |comsol |COMSOL |feature_comsol_COMSOL | - |comsol |HEATTRANSFER |feature_comsol_HEATTRANSFER | - |comsol-edu |COMSOLBATCH |feature_comsol-edu_COMSOLBATCH | - |comsol-edu |STRUCTURALMECHANICS |feature_comsol-edu_STRUCTURALMECHANICS | - |matlab |MATLAB |feature_matlab_MATLAB | - |matlab |Image_Toolbox |feature_matlab_Image_Toolbox | - |matlab-edu |MATLAB_Distrib_Comp_Engine |feature_matlab-edu_MATLAB_Distrib_Comp_Engine | - |matlab-edu |Image_Acquisition_Toolbox |feature_matlab-edu_Image_Acquisition_Toolbox\ | - -**Be aware, that the resource names in PBS Pro are CASE SENSITIVE!** - -### Example of qsub statement +| Application | Feature | PBS Pro resource name | +| ----------- | -------------------------- | ----------------------------------------------- | +| ansys | acfd | feature_ansys_acfd | +| ansys | aa_r | feature_ansys_aa_r | +| comsol | COMSOL | feature_comsol_COMSOL | +| comsol | HEATTRANSFER | feature_comsol_HEATTRANSFER | +| comsol-edu | COMSOLBATCH | feature_comsol-edu_COMSOLBATCH | +| comsol-edu | STRUCTURALMECHANICS | feature_comsol-edu_STRUCTURALMECHANICS | +| matlab | MATLAB | feature_matlab_MATLAB | +| matlab | Image_Toolbox | feature_matlab_Image_Toolbox | +| matlab-edu | MATLAB_Distrib_Comp_Engine | feature_matlab-edu_MATLAB_Distrib_Comp_Engine | +| matlab-edu | Image_Acquisition_Toolbox | feature_matlab-edu_Image_Acquisition_Toolbox\\ | + +!!! Warnig +Resource names in PBS Pro are case sensitive. + +### Example of qsub Statement Run an interactive PBS job with 1 Matlab EDU license, 1 Distributed Computing Toolbox and 32 Distributed Computing Engines (running on 32 cores): diff --git a/docs.it4i/anselm-cluster-documentation/software/java.md b/docs.it4i/anselm/software/java.md similarity index 96% rename from docs.it4i/anselm-cluster-documentation/software/java.md rename to docs.it4i/anselm/software/java.md index 7159f790b9f39ed904caeeae78893bba3ebf05b5..ddf032eb4eef469e8c68de98f16965696b153c72 100644 --- a/docs.it4i/anselm-cluster-documentation/software/java.md +++ b/docs.it4i/anselm/software/java.md @@ -1,7 +1,6 @@ -Java -==== +# Java -##Java on ANSELM +## Java on ANSELM Java is available on Anselm cluster. Activate java by loading the java module @@ -26,4 +25,3 @@ With the module loaded, not only the runtime environment (JRE), but also the dev ``` Java applications may use MPI for inter-process communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/). - diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md b/docs.it4i/anselm/software/mpi/Running_OpenMPI.md similarity index 83% rename from docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md rename to docs.it4i/anselm/software/mpi/Running_OpenMPI.md index 4f0168872215e74c6114899674bd1b15bec2c730..8e11a3c163bcac6a711e18c4232a98a6acb5a16f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md +++ b/docs.it4i/anselm/software/mpi/Running_OpenMPI.md @@ -1,14 +1,13 @@ -Running OpenMPI -============== +# Running OpenMPI + +## OpenMPI Program Execution -OpenMPI program execution -------------------------- The OpenMPI programs may be executed only via the PBS Workload manager, by entering an appropriate queue. On Anselm, the **bullxmpi-1.2.4.1** and **OpenMPI 1.6.5** are OpenMPI based MPI implementations. -### Basic usage +### Basic Usage -!!! Note "Note" - Use the mpiexec to run the OpenMPI code. +!!! note + Use the mpiexec to run the OpenMPI code. Example: @@ -28,8 +27,8 @@ Example: Hello world! from rank 3 of 4 on host cn110 ``` -!!! Note "Note" - Please be aware, that in this example, the directive **-pernode** is used to run only **one task per node**, which is normally an unwanted behaviour (unless you want to run hybrid code with just one MPI and 16 OpenMP tasks per node). In normal MPI programs **omit the -pernode directive** to run up to 16 MPI tasks per each node. +!!! note + Please be aware, that in this example, the directive **-pernode** is used to run only **one task per node**, which is normally an unwanted behaviour (unless you want to run hybrid code with just one MPI and 16 OpenMP tasks per node). In normal MPI programs **omit the -pernode directive** to run up to 16 MPI tasks per each node. In this example, we allocate 4 nodes via the express queue interactively. We set up the openmpi environment and interactively run the helloworld_mpi.x program. Note that the executable helloworld_mpi.x must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch filesystem. @@ -49,12 +48,12 @@ You need to preload the executable, if running on the local scratch /lscratch fi In this example, we assume the executable helloworld_mpi.x is present on compute node cn17 on local scratch. We call the mpiexec whith the **--preload-binary** argument (valid for openmpi). The mpiexec will copy the executable from cn17 to the /lscratch/15210.srv11 directory on cn108, cn109 and cn110 and execute the program. -!!! Note "Note" - MPI process mapping may be controlled by PBS parameters. +!!! note + MPI process mapping may be controlled by PBS parameters. The mpiprocs and ompthreads parameters allow for selection of number of running MPI processes per node as well as number of OpenMP threads per MPI process. -### One MPI process per node +### One MPI Process Per Node Follow this example to run one MPI process per node, 16 threads per process. @@ -68,7 +67,7 @@ Follow this example to run one MPI process per node, 16 threads per process. In this example, we demonstrate recommended way to run an MPI application, using 1 MPI processes per node and 16 threads per socket, on 4 nodes. -### Two MPI processes per node +### Two MPI Processes Per Node Follow this example to run two MPI processes per node, 8 threads per process. Note the options to mpiexec. @@ -82,7 +81,7 @@ Follow this example to run two MPI processes per node, 8 threads per process. No In this example, we demonstrate recommended way to run an MPI application, using 2 MPI processes per node and 8 threads per socket, each process and its threads bound to a separate processor socket of the node, on 4 nodes -### 16 MPI processes per node +### 16 MPI Processes Per Node Follow this example to run 16 MPI processes per node, 1 thread per process. Note the options to mpiexec. @@ -96,10 +95,10 @@ Follow this example to run 16 MPI processes per node, 1 thread per process. Note In this example, we demonstrate recommended way to run an MPI application, using 16 MPI processes per node, single threaded. Each process is bound to separate processor core, on 4 nodes. -### OpenMP thread affinity +### OpenMP Thread Affinity -!!! Note "Note" - Important! Bind every OpenMP thread to a core! +!!! note + Important! Bind every OpenMP thread to a core! In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP: @@ -110,18 +109,18 @@ In the previous two examples with one or two MPI processes per node, the operati or this one for Intel OpenMP: ```bash - $ export KMP_AFFINITY=granularity=fine,compact,1,0 -`` +$ export KMP_AFFINITY=granularity=fine,compact,1,0 +``` As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the following variables may be used for Intel or GCC: ```bash - $ export OMP_PROC_BIND=true - $ export OMP_PLACES=cores +$ export OMP_PROC_BIND=true +$ export OMP_PLACES=cores ``` -OpenMPI Process Mapping and Binding ------------------------------------------------- +## OpenMPI Process Mapping and Binding + The mpiexec allows for precise selection of how the MPI processes will be mapped to the computational nodes and how these processes will bind to particular processor sockets and cores. MPI process mapping may be specified by a hostfile or rankfile input to the mpiexec program. Altough all implementations of MPI provide means for process mapping and binding, following examples are valid for the openmpi only. @@ -153,8 +152,8 @@ In this example, we see that ranks have been mapped on nodes according to the or Exact control of MPI process placement and resource binding is provided by specifying a rankfile -!!! Note "Note" - Appropriate binding may boost performance of your application. +!!! note + Appropriate binding may boost performance of your application. Example rankfile @@ -193,7 +192,7 @@ In this example we run 5 MPI processes (5 ranks) on four nodes. The rankfile def It is users responsibility to provide correct number of ranks, sockets and cores. -### Bindings verification +### Bindings Verification In all cases, binding and threading may be verified by executing for example: @@ -203,15 +202,15 @@ In all cases, binding and threading may be verified by executing for example: $ mpiexec -bysocket -bind-to-socket echo $OMP_NUM_THREADS ``` -Changes in OpenMPI 1.8 ----------------------- +## Changes in OpenMPI 1.8 + Some options have changed in OpenMPI version 1.8. - |version 1.6.5 |version 1.8.1 | - | --- | --- | - |--bind-to-none |--bind-to none | - |--bind-to-core |--bind-to core | - |--bind-to-socket |--bind-to socket | - |-bysocket |--map-by socket | - |-bycore |--map-by core | - |-pernode |--map-by ppr:1:node | +| version 1.6.5 | version 1.8.1 | +| ---------------- | ------------------- | +| --bind-to-none | --bind-to none | +| --bind-to-core | --bind-to core | +| --bind-to-socket | --bind-to socket | +| -bysocket | --map-by socket | +| -bycore | --map-by core | +| -pernode | --map-by ppr:1:node | diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md b/docs.it4i/anselm/software/mpi/mpi.md similarity index 61% rename from docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md rename to docs.it4i/anselm/software/mpi/mpi.md index 5f81e8ee4eb3855c549bf3f558431afd3073e3f7..bc60afb16ebee9968d942c0e4189f79705118276 100644 --- a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md +++ b/docs.it4i/anselm/software/mpi/mpi.md @@ -1,17 +1,16 @@ -MPI -=== +# MPI + +## Setting Up MPI Environment -Setting up MPI Environment --------------------------- The Anselm cluster provides several implementations of the MPI library: - |MPI Library |Thread support | - | --- | --- | - |The highly optimized and stable **bullxmpi 1.2.4.1** |Partial thread support up to MPI_THREAD_SERIALIZED | - |The **Intel MPI 4.1** |Full thread support up to MPI_THREAD_MULTIPLE | - |The [OpenMPI 1.6.5](href="http://www.open-mpi.org)| Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | - |The OpenMPI 1.8.1 |Full thread support up to MPI_THREAD_MULTIPLE, MPI-3.0 support | - |The **mpich2 1.9** |Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | +| MPI Library | Thread support | +| ---------------------------------------------------- | --------------------------------------------------------------- | +| The highly optimized and stable **bullxmpi 1.2.4.1** | Partial thread support up to MPI_THREAD_SERIALIZED | +| The **Intel MPI 4.1** | Full thread support up to MPI_THREAD_MULTIPLE | +| The [OpenMPI 1.6.5](href="http://www.open-mpi.org) | Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | +| The OpenMPI 1.8.1 | Full thread support up to MPI_THREAD_MULTIPLE, MPI-3.0 support | +| The **mpich2 1.9** | Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | MPI libraries are activated via the environment modules. @@ -20,7 +19,7 @@ Look up section modulefiles/mpi in module avail ```bash $ module avail ------------------------- /opt/modules/modulefiles/mpi ------------------------- - bullxmpi/bullxmpi-1.2.4.1 mvapich2/1.9-icc + bullxmpi/bullxmpi-1.2.4.1 mvapich2/1.9-icc impi/4.0.3.008 openmpi/1.6.5-gcc(default) impi/4.1.0.024 openmpi/1.6.5-gcc46 impi/4.1.0.030 openmpi/1.6.5-icc @@ -32,15 +31,15 @@ Look up section modulefiles/mpi in module avail There are default compilers associated with any particular MPI implementation. The defaults may be changed, the MPI libraries may be used in conjunction with any compiler. The defaults are selected via the modules in following way -|Module|MPI|Compiler suite| -|-------- |---|---| -|PrgEnv-gnu|bullxmpi-1.2.4.1|bullx GNU 4.4.6| -|PrgEnv-intel|Intel MPI 4.1.1|Intel 13.1.1| -|bullxmpi|bullxmpi-1.2.4.1|none, select via module| -|impi|Intel MPI 4.1.1|none, select via module| -|openmpi|OpenMPI 1.6.5|GNU compilers 4.8.1, GNU compilers 4.4.6, Intel Compilers| -|openmpi|OpenMPI 1.8.1|GNU compilers 4.8.1, GNU compilers 4.4.6, GNU compilers 4.9.0, Intel Compilers| -|mvapich2|MPICH2 1.9|GNU compilers 4.8.1, GNU compilers 4.4.6, Intel Compilers| +| Module | MPI | Compiler suite | +| ------------ | ---------------- | ------------------------------------------------------------------------------ | +| PrgEnv-gnu | bullxmpi-1.2.4.1 | bullx GNU 4.4.6 | +| PrgEnv-intel | Intel MPI 4.1.1 | Intel 13.1.1 | +| bullxmpi | bullxmpi-1.2.4.1 | none, select via module | +| impi | Intel MPI 4.1.1 | none, select via module | +| openmpi | OpenMPI 1.6.5 | GNU compilers 4.8.1, GNU compilers 4.4.6, Intel Compilers | +| openmpi | OpenMPI 1.8.1 | GNU compilers 4.8.1, GNU compilers 4.4.6, GNU compilers 4.9.0, Intel Compilers | +| mvapich2 | MPICH2 1.9 | GNU compilers 4.8.1, GNU compilers 4.4.6, Intel Compilers | Examples: @@ -59,10 +58,10 @@ To use openmpi with the intel compiler suite, use In this example, the openmpi 1.6.5 using intel compilers is activated -Compiling MPI Programs ----------------------- -!!! Note "Note" - After setting up your MPI environment, compile your program using one of the mpi wrappers +## Compiling MPI Programs + +!!! note + After setting up your MPI environment, compile your program using one of the mpi wrappers ```bash $ mpicc -v @@ -106,33 +105,33 @@ Compile the above example with $ mpicc helloworld_mpi.c -o helloworld_mpi.x ``` -Running MPI Programs --------------------- -!!! Note "Note" - The MPI program executable must be compatible with the loaded MPI module. +## Running MPI Programs + +!!! note + The MPI program executable must be compatible with the loaded MPI module. Always compile and execute using the very same MPI module. It is strongly discouraged to mix mpi implementations. Linking an application with one MPI implementation and running mpirun/mpiexec form other implementation may result in unexpected errors. The MPI program executable must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch file system. You need to preload the executable, if running on the local scratch /lscratch file system. -### Ways to run MPI programs +### Ways to Run MPI Programs Optimal way to run an MPI program depends on its memory requirements, memory access pattern and communication pattern. -!!! Note "Note" - Consider these ways to run an MPI program: +!!! note + Consider these ways to run an MPI program: - 1. One MPI process per node, 16 threads per process - 2. Two MPI processes per node, 8 threads per process - 3. 16 MPI processes per node, 1 thread per process. + 1. One MPI process per node, 16 threads per process + 2. Two MPI processes per node, 8 threads per process + 3. 16 MPI processes per node, 1 thread per process. **One MPI** process per node, using 16 threads, is most useful for memory demanding applications, that make good use of processor cache memory and are not memory bound. This is also a preferred way for communication intensive applications as one process per node enjoys full bandwidth access to the network interface. **Two MPI** processes per node, using 8 threads each, bound to processor socket is most useful for memory bandwidth bound applications such as BLAS1 or FFT, with scalable memory demand. However, note that the two processes will share access to the network interface. The 8 threads and socket binding should ensure maximum memory access bandwidth and minimize communication, migration and NUMA effect overheads. -!!! Note "Note" - Important! Bind every OpenMP thread to a core! +!!! note + Important! Bind every OpenMP thread to a core! In the previous two cases with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You want to avoid this by setting the KMP_AFFINITY or GOMP_CPU_AFFINITY environment variables. diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md similarity index 90% rename from docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md rename to docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md index 6c79215c00ddc14456508c3e5fcbb76fe0e7b88b..9625ed53e88575101548ddbe48687829ac18414c 100644 --- a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md +++ b/docs.it4i/anselm/software/mpi/mpi4py-mpi-for-python.md @@ -1,16 +1,15 @@ -MPI4Py (MPI for Python) -======================= +# MPI4Py (MPI for Python) + +## Introduction -Introduction ------------- MPI for Python provides bindings of the Message Passing Interface (MPI) standard for the Python programming language, allowing any Python program to exploit multiple processors. This package is constructed on top of the MPI-1/2 specifications and provides an object oriented interface which closely follows MPI-2 C++ bindings. It supports point-to-point (sends, receives) and collective (broadcasts, scatters, gathers) communications of any picklable Python object, as well as optimized communications of Python object exposing the single-segment buffer interface (NumPy arrays, builtin bytes/string/array objects). On Anselm MPI4Py is available in standard Python modules. -Modules -------- +## Modules + MPI4Py is build for OpenMPI. Before you start with MPI4Py you need to load Python and OpenMPI modules. ```bash @@ -18,8 +17,8 @@ MPI4Py is build for OpenMPI. Before you start with MPI4Py you need to load Pytho $ module load openmpi ``` -Execution ---------- +## Execution + You need to import MPI to your python program. Include the following line to the python script: ```cpp @@ -38,10 +37,9 @@ For example $ mpiexec python hello_world.py ``` -Examples --------- +## Examples -### Hello world! +### Hello World! ```cpp from mpi4py import MPI @@ -53,7 +51,7 @@ Examples comm.Barrier() # wait for everybody to synchronize ``` -###Collective Communication with NumPy arrays +### Collective Communication With NumPy Arrays ```cpp from mpi4py import MPI @@ -92,4 +90,4 @@ Execute the above code as: $ mpiexec -bycore -bind-to-core python hello_world.py ``` -In this example, we run MPI4Py enabled code on 4 nodes, 16 cores per node (total of 64 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pythonhosted.org/mpi4py/usrman/index.html). +In this example, we run MPI4Py enabled code on 4 nodes, 16 cores per node (total of 64 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pypi.python.org/pypi/mpi4py). diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/running-mpich2.md b/docs.it4i/anselm/software/mpi/running-mpich2.md similarity index 89% rename from docs.it4i/anselm-cluster-documentation/software/mpi/running-mpich2.md rename to docs.it4i/anselm/software/mpi/running-mpich2.md index d5cf06387e7958d8eb8a8fb8244488d8baffe363..64d3c620fddf82b25339d535fb984067924ef29a 100644 --- a/docs.it4i/anselm-cluster-documentation/software/mpi/running-mpich2.md +++ b/docs.it4i/anselm/software/mpi/running-mpich2.md @@ -1,14 +1,13 @@ -Running MPICH2 -============== +# Running MPICH2 + +## MPICH2 Program Execution -MPICH2 program execution ------------------------- The MPICH2 programs use mpd daemon or ssh connection to spawn processes, no PBS support is needed. However the PBS allocation is required to access compute nodes. On Anselm, the **Intel MPI** and **mpich2 1.9** are MPICH2 based MPI implementations. -### Basic usage +### Basic Usage -!!! Note "Note" - Use the mpirun to execute the MPICH2 code. +!!! note + Use the mpirun to execute the MPICH2 code. Example: @@ -42,14 +41,14 @@ You need to preload the executable, if running on the local scratch /lscratch fi Hello world! from rank 3 of 4 on host cn110 ``` -In this example, we assume the executable helloworld_mpi.x is present on shared home directory. We run the cp command via mpirun, copying the executable from shared home to local scratch . Second mpirun will execute the binary in the /lscratch/15210.srv11 directory on nodes cn17, cn108, cn109 and cn110, one process per node. +In this example, we assume the executable helloworld_mpi.x is present on shared home directory. We run the cp command via mpirun, copying the executable from shared home to local scratch . Second mpirun will execute the binary in the /lscratch/15210.srv11 directory on nodes cn17, cn108, cn109 and cn110, one process per node. -!!! Note "Note" - MPI process mapping may be controlled by PBS parameters. +!!! note + MPI process mapping may be controlled by PBS parameters. The mpiprocs and ompthreads parameters allow for selection of number of running MPI processes per node as well as number of OpenMP threads per MPI process. -### One MPI process per node +### One MPI Process Per Node Follow this example to run one MPI process per node, 16 threads per process. Note that no options to mpirun are needed @@ -63,7 +62,7 @@ Follow this example to run one MPI process per node, 16 threads per process. Not In this example, we demonstrate recommended way to run an MPI application, using 1 MPI processes per node and 16 threads per socket, on 4 nodes. -### Two MPI processes per node +### Two MPI Processes Per Node Follow this example to run two MPI processes per node, 8 threads per process. Note the options to mpirun for mvapich2. No options are needed for impi. @@ -77,7 +76,7 @@ Follow this example to run two MPI processes per node, 8 threads per process. No In this example, we demonstrate recommended way to run an MPI application, using 2 MPI processes per node and 8 threads per socket, each process and its threads bound to a separate processor socket of the node, on 4 nodes -### 16 MPI processes per node +### 16 MPI Processes Per Node Follow this example to run 16 MPI processes per node, 1 thread per process. Note the options to mpirun for mvapich2. No options are needed for impi. @@ -91,10 +90,10 @@ Follow this example to run 16 MPI processes per node, 1 thread per process. Note In this example, we demonstrate recommended way to run an MPI application, using 16 MPI processes per node, single threaded. Each process is bound to separate processor core, on 4 nodes. -### OpenMP thread affinity +### OpenMP Thread Affinity -!!! Note "Note" - Important! Bind every OpenMP thread to a core! +!!! note + Important! Bind every OpenMP thread to a core! In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP: @@ -115,8 +114,8 @@ As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the f $ export OMP_PLACES=cores ``` -MPICH2 Process Mapping and Binding ----------------------------------- +## MPICH2 Process Mapping and Binding + The mpirun allows for precise selection of how the MPI processes will be mapped to the computational nodes and how these processes will bind to particular processor sockets and cores. ### Machinefile @@ -150,7 +149,7 @@ In this example, we see that ranks have been mapped on nodes according to the or The Intel MPI automatically binds each process and its threads to the corresponding portion of cores on the processor socket of the node, no options needed. The binding is primarily controlled by environment variables. Read more about mpi process binding on [Intel website](https://software.intel.com/sites/products/documentation/hpc/ics/impi/41/lin/Reference_Manual/Environment_Variables_Process_Pinning.htm). The MPICH2 uses the -bind-to option Use -bind-to numa or -bind-to core to bind the process on single core or entire socket. -### Bindings verification +### Bindings Verification In all cases, binding and threading may be verified by executing @@ -159,7 +158,6 @@ In all cases, binding and threading may be verified by executing $ mpirun -bindto numa echo $OMP_NUM_THREADS ``` -Intel MPI on Xeon Phi ---------------------- +## Intel MPI on Xeon Phi The[MPI section of Intel Xeon Phi chapter](../intel-xeon-phi/) provides details on how to run Intel MPI code on Xeon Phi architecture. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/introduction.md b/docs.it4i/anselm/software/numerical-languages/introduction.md similarity index 89% rename from docs.it4i/anselm-cluster-documentation/software/numerical-languages/introduction.md rename to docs.it4i/anselm/software/numerical-languages/introduction.md index 9d54dceac9b0d77e4a4a9ca4313ff2cf0cb3b875..67493f1f7d099c0c9a8986b2118bff77aa4dd38b 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/introduction.md +++ b/docs.it4i/anselm/software/numerical-languages/introduction.md @@ -1,14 +1,13 @@ -Numerical languages -=================== +# Numerical languages Interpreted languages for numerical computations and analysis -Introduction ------------- +## Introduction + This section contains a collection of high-level interpreted languages, primarily intended for numerical computations. -Matlab ------- +## Matlab + MATLAB® is a high-level language and interactive environment for numerical computation, visualization, and programming. ```bash @@ -18,8 +17,8 @@ MATLAB® is a high-level language and interactive environment for numerical comp Read more at the [Matlab page](matlab/). -Octave ------- +## Octave + GNU Octave is a high-level interpreted language, primarily intended for numerical computations. The Octave language is quite similar to Matlab so that most programs are easily portable. ```bash @@ -29,8 +28,7 @@ GNU Octave is a high-level interpreted language, primarily intended for numerica Read more at the [Octave page](octave/). -R ---- +## R The R is an interpreted language and environment for statistical computing and graphics. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md b/docs.it4i/anselm/software/numerical-languages/matlab.md similarity index 78% rename from docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md rename to docs.it4i/anselm/software/numerical-languages/matlab.md index dbe107990dfa538f177344017b77a58a80117c5a..d7c3d907452ca38deea8f07235170ead3114c1eb 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md +++ b/docs.it4i/anselm/software/numerical-languages/matlab.md @@ -1,12 +1,11 @@ -Matlab -====== +# Matlab + +## Introduction -Introduction ------------- Matlab is available in versions R2015a and R2015b. There are always two variants of the release: -- Non commercial or so called EDU variant, which can be used for common research and educational purposes. -- Commercial or so called COM variant, which can used also for commercial activities. The licenses for commercial variant are much more expensive, so usually the commercial variant has only subset of features compared to the EDU available. +* Non commercial or so called EDU variant, which can be used for common research and educational purposes. +* Commercial or so called COM variant, which can used also for commercial activities. The licenses for commercial variant are much more expensive, so usually the commercial variant has only subset of features compared to the EDU available. To load the latest version of Matlab load the module @@ -22,9 +21,9 @@ By default the EDU variant is marked as default. If you need other version or va If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler. -If you require the Matlab GUI, please follow the general informations about [running graphical applications](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/). +If you require the Matlab GUI, please follow the general information about [running graphical applications](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). -Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part [here](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/)) is recommended. +Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part [here](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/)) is recommended. To run Matlab with GUI, use @@ -40,10 +39,10 @@ To run Matlab in text mode, without the Matlab Desktop GUI environment, use plots, images, etc... will be still available. -Running parallel Matlab using Distributed Computing Toolbox / Engine ------------------------------------------------------------------------- -!!! Note "Note" - Distributed toolbox is available only for the EDU variant +## Running Parallel Matlab Using Distributed Computing Toolbox / Engine + +!!! note + Distributed toolbox is available only for the EDU variant The MPIEXEC mode available in previous versions is no longer available in MATLAB 2015. Also, the programming interface has changed. Refer to [Release Notes](http://www.mathworks.com/help/distcomp/release-notes.html#buanp9e-1). @@ -59,18 +58,18 @@ To use Distributed Computing, you first need to setup a parallel profile. We hav SalomonPBSPro ``` -Or in the GUI, go to tab HOME -> Parallel -> Manage Cluster Profiles..., click Import and navigate to: +Or in the GUI, go to tab HOME -> Parallel -> Manage Cluster Profiles..., click Import and navigate to: /apps/all/MATLAB/2015a-EDU/SalomonPBSPro.settings With the new mode, MATLAB itself launches the workers via PBS, so you can either use interactive mode or a batch mode on one node, but the actual parallel processing will be done in a separate job started by MATLAB itself. Alternatively, you can use "local" mode to run parallel code on just a single node. -!!! Note "Note" - The profile is confusingly named Salomon, but you can use it also on Anselm. +!!! note + The profile is confusingly named Salomon, but you can use it also on Anselm. -### Parallel Matlab interactive session +### Parallel Matlab Interactive Session -Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/). +Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/). ```bash $ xhost + @@ -89,7 +88,7 @@ Once the access to compute nodes is granted by PBS, user can load following modu r1i0n17$ matlab & ``` -### Parallel Matlab batch job in Local mode +### Parallel Matlab Batch Job in Local Mode To run matlab in batch mode, write an matlab script, then write a bash jobscript and execute via the qsub command. By default, matlab will execute one matlab worker instance per allocated core. @@ -124,7 +123,7 @@ Submit the jobscript using qsub $ qsub ./jobscript ``` -### Parallel Matlab Local mode program example +### Parallel Matlab Local Mode Program Example The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started. @@ -134,8 +133,8 @@ The last part of the configuration is done directly in the user Matlab script be This script creates scheduler object "cluster" of type "local" that starts workers locally. -!!! Note "Note" - Please note: Every Matlab script that needs to initialize/use matlabpool has to contain these three lines prior to calling parpool(sched, ...) function. +!!! note + Every Matlab script that needs to initialize/use matlabpool has to contain these three lines prior to calling parpool(sched, ...) function. The last step is to start matlabpool with "cluster" object and correct number of workers. We have 24 cores per node, so we start 24 workers. @@ -148,6 +147,7 @@ The last step is to start matlabpool with "cluster" object and correct number of parpool close ``` + The complete example showing how to use Distributed Computing Toolbox in local mode is shown here. ```bash @@ -177,7 +177,7 @@ The complete example showing how to use Distributed Computing Toolbox in local m You can copy and paste the example in a .m file and execute. Note that the parpool size should correspond to **total number of cores** available on allocated nodes. -### Parallel Matlab Batch job using PBS mode (workers spawned in a separate job) +### Parallel Matlab Batch Job Using PBS Mode (Workers Spawned in a Separate Job) This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBSPro profile that needs to be imported to Cluster Manager, as mentioned before. This methodod uses MATLAB's PBS Scheduler interface - it spawns the workers in a separate job submitted by MATLAB using qsub. @@ -213,11 +213,12 @@ Note that we first construct a cluster object using the imported profile, then s You can start this script using batch mode the same way as in Local mode example. -### Parallel Matlab Batch with direct launch (workers spawned within the existing job) +### Parallel Matlab Batch With Direct Launch (Workers Spawned Within the Existing Job) This method is a "hack" invented by us to emulate the mpiexec functionality found in previous MATLAB versions. We leverage the MATLAB Generic Scheduler interface, but instead of submitting the workers to PBS, we launch the workers directly within the running job, thus we avoid the issues with master script and workers running in separate jobs (issues with license not available, waiting for the worker's job to spawn etc.) -Please note that this method is experimental. +!!! warning + This method is experimental. For this method, you need to use SalomonDirect profile, import it using [the same way as SalomonPBSPro](matlab/#running-parallel-matlab-using-distributed-computing-toolbox---engine) @@ -248,33 +249,33 @@ This is an example of m-script using direct mode: delete(pool) ``` -### Non-interactive Session and Licenses +### Non-Interactive Session and Licenses -If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the " -l _feature_matlab_MATLAB=1" for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/). +If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l _feature_matlab_MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/). In case of non-interactive session please read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. -### Matlab Distributed Computing Engines start up time +### Matlab Distributed Computing Engines Start Up Time Starting Matlab workers is an expensive process that requires certain amount of time. For your information please see the following table: - |compute nodes|number of workers|start-up time[s]| - |---|---|---| - |16|384|831| - |8|192|807| - |4|96|483| - |2|48|16| +| compute nodes | number of workers | start-up time[s] | +| ------------- | ----------------- | ---------------- | +| 16 | 384 | 831 | +| 8 | 192 | 807 | +| 4 | 96 | 483 | +| 2 | 48 | 16 | + +## MATLAB on UV2000 -MATLAB on UV2000 ------------------ UV2000 machine available in queue "qfat" can be used for MATLAB computations. This is a SMP NUMA machine with large amount of RAM, which can be beneficial for certain types of MATLAB jobs. CPU cores are allocated in chunks of 8 for this machine. You can use MATLAB on UV2000 in two parallel modes: -### Threaded mode +### Threaded Mode -Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as fft, , eig, svd, etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes. +Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as fft, , eig, svd, etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes. -### Local cluster mode +### Local Cluster Mode You can also use Parallel Toolbox on UV2000. Use l[ocal cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md b/docs.it4i/anselm/software/numerical-languages/matlab_1314.md similarity index 58% rename from docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md rename to docs.it4i/anselm/software/numerical-languages/matlab_1314.md index 84b2897ea299701773c9d9736d2af4ed52e2ed3a..8c1012531c67f272907e154addb5f336e636eaf6 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md +++ b/docs.it4i/anselm/software/numerical-languages/matlab_1314.md @@ -1,15 +1,14 @@ -Matlab 2013-2014 -================ +# Matlab 2013-2014 -Introduction ------------- -!!! Note "Note" - This document relates to the old versions R2013 and R2014. For MATLAB 2015, please use [this documentation instead](matlab/). +## Introduction + +!!! note + This document relates to the old versions R2013 and R2014. For MATLAB 2015, please use [this documentation instead](matlab/). Matlab is available in the latest stable version. There are always two variants of the release: -- Non commercial or so called EDU variant, which can be used for common research and educational purposes. -- Commercial or so called COM variant, which can used also for commercial activities. The licenses for commercial variant are much more expensive, so usually the commercial variant has only subset of features compared to the EDU available. +* Non commercial or so called EDU variant, which can be used for common research and educational purposes. +* Commercial or so called COM variant, which can used also for commercial activities. The licenses for commercial variant are much more expensive, so usually the commercial variant has only subset of features compared to the EDU available. To load the latest version of Matlab load the module @@ -25,26 +24,26 @@ By default the EDU variant is marked as default. If you need other version or va If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler. -If you require the Matlab GUI, please follow the general informations about running graphical applications +If you require the Matlab GUI, please follow the general information about running graphical applications Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part) is recommended. To run Matlab with GUI, use ```bash - $ matlab +$ matlab ``` To run Matlab in text mode, without the Matlab Desktop GUI environment, use -```bash```bash - $ matlab -nodesktop -nosplash +```bash +$ matlab -nodesktop -nosplash ``` -plots, images, etc... will be still available. +Plots, images, etc... will be still available. + +## Running Parallel Matlab Using Distributed Computing Toolbox / Engine -Running parallel Matlab using Distributed Computing Toolbox / Engine --------------------------------------------------------------------- Recommended parallel mode for running parallel Matlab on Anselm is MPIEXEC mode. In this mode user allocates resources through PBS prior to starting Matlab. Once resources are granted the main Matlab instance is started on the first compute node assigned to job by PBS and workers are started on all remaining nodes. User can use both interactive and non-interactive PBS sessions. This mode guarantees that the data processing is not performed on login nodes, but all processing is on compute nodes.  @@ -52,28 +51,30 @@ Recommended parallel mode for running parallel Matlab on Anselm is MPIEXEC mode. For the performance reasons Matlab should use system MPI. On Anselm the supported MPI implementation for Matlab is Intel MPI. To switch to system MPI user has to override default Matlab setting by creating new configuration file in its home directory. The path and file name has to be exactly the same as in the following listing: ```bash - $ vim ~/matlab/mpiLibConf.m +$ vim ~/matlab/mpiLibConf.m +``` - function [lib, extras] = mpiLibConf - %MATLAB MPI Library overloading for Infiniband Networks +```bash +function [lib, extras] = mpiLibConf +%MATLAB MPI Library overloading for Infiniband Networks - mpich = '/opt/intel/impi/4.1.1.036/lib64/'; +mpich = '/opt/intel/impi/4.1.1.036/lib64/'; - disp('Using Intel MPI 4.1.1.036 over Infiniband') +disp('Using Intel MPI 4.1.1.036 over Infiniband') - lib = strcat(mpich, 'libmpich.so'); - mpl = strcat(mpich, 'libmpl.so'); - opa = strcat(mpich, 'libopa.so'); +lib = strcat(mpich, 'libmpich.so'); +mpl = strcat(mpich, 'libmpl.so'); +opa = strcat(mpich, 'libopa.so'); - extras = {}; +extras = {}; ``` System MPI library allows Matlab to communicate through 40 Gbit/s InfiniBand QDR interconnect instead of slower 1 Gbit Ethernet network. -!!! Note "Note" - Please note: The path to MPI library in "mpiLibConf.m" has to match with version of loaded Intel MPI module. In this example the version 4.1.1.036 of Intel MPI is used by Matlab and therefore module impi/4.1.1.036 has to be loaded prior to starting Matlab. +!!! note + The path to MPI library in "mpiLibConf.m" has to match with version of loaded Intel MPI module. In this example the version 4.1.1.036 of Intel MPI is used by Matlab and therefore module impi/4.1.1.036 has to be loaded prior to starting Matlab. -### Parallel Matlab interactive session +### Parallel Matlab Interactive Session Once this file is in place, user can request resources from PBS. Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see. @@ -95,32 +96,32 @@ Once the access to compute nodes is granted by PBS, user can load following modu cn79$ matlab & ``` -### Parallel Matlab batch job +### Parallel Matlab Batch Job To run matlab in batch mode, write an matlab script, then write a bash jobscript and execute via the qsub command. By default, matlab will execute one matlab worker instance per allocated core. ```bash - #!/bin/bash - #PBS -A PROJECT ID - #PBS -q qprod - #PBS -l select=2:ncpus=16:mpiprocs=16:ompthreads=1 +#!/bin/bash +#PBS -A PROJECT ID +#PBS -q qprod +#PBS -l select=2:ncpus=16:mpiprocs=16:ompthreads=1 - # change to shared scratch directory - SCR=/scratch/$USER/$PBS_JOBID - mkdir -p $SCR ; cd $SCR || exit +# change to shared scratch directory +SCR=/scratch/$USER/$PBS_JOBID +mkdir -p $SCR ; cd $SCR || exit - # copy input file to scratch - cp $PBS_O_WORKDIR/matlabcode.m . +# copy input file to scratch +cp $PBS_O_WORKDIR/matlabcode.m . - # load modules - module load matlab/R2013a-EDU - module load impi/4.1.1.036 +# load modules +module load matlab/R2013a-EDU +module load impi/4.1.1.036 - # execute the calculation - matlab -nodisplay -r matlabcode > output.out +# execute the calculation +matlab -nodisplay -r matlabcode > output.out - # copy output file to home - cp output.out $PBS_O_WORKDIR/. +# copy output file to home +cp output.out $PBS_O_WORKDIR/. ``` This script may be submitted directly to the PBS workload manager via the qsub command. The inputs and matlab script are in matlabcode.m file, outputs in output.out file. Note the missing .m extension in the matlab -r matlabcodefile call, **the .m must not be included**. Note that the **shared /scratch must be used**. Further, it is **important to include quit** statement at the end of the matlabcode.m script. @@ -128,81 +129,81 @@ This script may be submitted directly to the PBS workload manager via the qsub c Submit the jobscript using qsub ```bash - $ qsub ./jobscript +$ qsub ./jobscript ``` -### Parallel Matlab program example +### Parallel Matlab Program Example The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started. ```bash - sched = findResource('scheduler', 'type', 'mpiexec'); - set(sched, 'MpiexecFileName', '/apps/intel/impi/4.1.1/bin/mpirun'); - set(sched, 'EnvironmentSetMethod', 'setenv'); +sched = findResource('scheduler', 'type', 'mpiexec'); +set(sched, 'MpiexecFileName', '/apps/intel/impi/4.1.1/bin/mpirun'); +set(sched, 'EnvironmentSetMethod', 'setenv'); ``` This script creates scheduler object "sched" of type "mpiexec" that starts workers using mpirun tool. To use correct version of mpirun, the second line specifies the path to correct version of system Intel MPI library. -!!! Note "Note" - Please note: Every Matlab script that needs to initialize/use matlabpool has to contain these three lines prior to calling matlabpool(sched, ...) function. +!!! note + Every Matlab script that needs to initialize/use matlabpool has to contain these three lines prior to calling matlabpool(sched, ...) function. The last step is to start matlabpool with "sched" object and correct number of workers. In this case qsub asked for total number of 32 cores, therefore the number of workers is also set to 32. ```bash - matlabpool(sched,32); +matlabpool(sched,32); - ... parallel code ... +... parallel code ... - matlabpool close +matlabpool close ``` The complete example showing how to use Distributed Computing Toolbox is show here. ```bash - sched = findResource('scheduler', 'type', 'mpiexec'); - set(sched, 'MpiexecFileName', '/apps/intel/impi/4.1.1/bin/mpirun') - set(sched, 'EnvironmentSetMethod', 'setenv') - set(sched, 'SubmitArguments', '') - sched - - matlabpool(sched,32); - - n=2000; - - W = rand(n,n); - W = distributed(W); - x = (1:n)'; - x = distributed(x); - spmd - [~, name] = system('hostname') - - T = W*x; % Calculation performed on labs, in parallel. - % T and W are both codistributed arrays here. - end - T; - whos % T and W are both distributed arrays here. - - matlabpool close - quit +sched = findResource('scheduler', 'type', 'mpiexec'); +set(sched, 'MpiexecFileName', '/apps/intel/impi/4.1.1/bin/mpirun') +set(sched, 'EnvironmentSetMethod', 'setenv') +set(sched, 'SubmitArguments', '') +sched + +matlabpool(sched,32); + +n=2000; + +W = rand(n,n); +W = distributed(W); +x = (1:n)'; +x = distributed(x); +spmd +[~, name] = system('hostname') + + T = W*x; % Calculation performed on labs, in parallel. + % T and W are both codistributed arrays here. +end +T; +whos % T and W are both distributed arrays here. + +matlabpool close +quit ``` You can copy and paste the example in a .m file and execute. Note that the matlabpool size should correspond to **total number of cores** available on allocated nodes. -### Non-interactive Session and Licenses +### Non-Interactive Session and Licenses -If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the " -l _feature_matlab_MATLAB=1" for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/). +If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../isv_licenses/). In case of non-interactive session please read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. -### Matlab Distributed Computing Engines start up time +### Matlab Distributed Computing Engines Start Up Time Starting Matlab workers is an expensive process that requires certain amount of time. For your information please see the following table: - |compute nodes|number of workers|start-up time[s]| - |---|---|---| - |16|256|1008| - |8|128|534| - |4|64|333| - |2|32|210| +| compute nodes | number of workers | start-up time[s] | +| ------------- | ----------------- | ---------------- | +| 16 | 256 | 1008 | +| 8 | 128 | 534 | +| 4 | 64 | 333 | +| 2 | 32 | 210 | diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md b/docs.it4i/anselm/software/numerical-languages/octave.md similarity index 80% rename from docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md rename to docs.it4i/anselm/software/numerical-languages/octave.md index db07503afb05f69b39495cc2322fe2465e96f7e8..19142eb0f6b9150df56c553ba395d385c4b92a47 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md +++ b/docs.it4i/anselm/software/numerical-languages/octave.md @@ -1,20 +1,18 @@ -Octave -====== +# Octave + +## Introduction -Introduction ------------- GNU Octave is a high-level interpreted language, primarily intended for numerical computations. It provides capabilities for the numerical solution of linear and nonlinear problems, and for performing other numerical experiments. It also provides extensive graphics capabilities for data visualization and manipulation. Octave is normally used through its interactive command line interface, but it can also be used to write non-interactive programs. The Octave language is quite similar to Matlab so that most programs are easily portable. Read more on <http://www.gnu.org/software/octave/> Two versions of octave are available on Anselm, via module -|Version|module| -|---|---| -|Octave 3.8.2, compiled with GCC and Multithreaded MKL|Octave/3.8.2-gimkl-2.11.5| -|Octave 4.0.1, compiled with GCC and Multithreaded MKL|Octave/4.0.1-gimkl-2.11.5| -|Octave 4.0.0, compiled with >GCC and OpenBLAS|Octave/4.0.0-foss-2015g| +| Version | module | +| ----------------------------------------------------- | ------------------------- | +| Octave 3.8.2, compiled with GCC and Multithreaded MKL | Octave/3.8.2-gimkl-2.11.5 | +| Octave 4.0.1, compiled with GCC and Multithreaded MKL | Octave/4.0.1-gimkl-2.11.5 | +| Octave 4.0.0, compiled with >GCC and OpenBLAS | Octave/4.0.0-foss-2015g | - Modules and execution ----------------------- +## Modules and Execution $ module load Octave @@ -50,7 +48,7 @@ To run octave in batch mode, write an octave script, then write a bash jobscript exit ``` -This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](http://support.it4i.cz/docs/anselm-cluster-documentation/resource-allocation-and-job-execution). +This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](../../job-submission-and-execution/). The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c code. This is very useful for running native c subroutines in octave environment. @@ -60,11 +58,11 @@ The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c c Octave may use MPI for interprocess communication This functionality is currently not supported on Anselm cluster. In case you require the octave interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/). -Xeon Phi Support ----------------- +## Xeon Phi Support + Octave may take advantage of the Xeon Phi accelerators. This will only work on the [Intel Xeon Phi](../intel-xeon-phi/) [accelerated nodes](../../compute-nodes/). -### Automatic offload support +### Automatic Offload Support Octave can accelerate BLAS type operations (in particular the Matrix Matrix multiplications] on the Xeon Phi accelerator, via [Automatic Offload using the MKL library](../intel-xeon-phi/#section-3) @@ -88,18 +86,18 @@ Example In this example, the calculation was automatically divided among the CPU cores and the Xeon Phi MIC accelerator, reducing the total runtime from 6.3 secs down to 2.9 secs. -### Native support +### Native Support A version of [native](../intel-xeon-phi/#section-4) Octave is compiled for Xeon Phi accelerators. Some limitations apply for this version: -- Only command line support. GUI, graph plotting etc. is not supported. -- Command history in interactive mode is not supported. +* Only command line support. GUI, graph plotting etc. is not supported. +* Command history in interactive mode is not supported. Octave is linked with parallel Intel MKL, so it best suited for batch processing of tasks that utilize BLAS, LAPACK and FFT operations. By default, number of threads is set to 120, you can control this with > OMP_NUM_THREADS environment variable. -!!! Note "Note" - Calculations that do not employ parallelism (either by using parallel MKL e.g. via matrix operations, fork() function, [parallel package](http://octave.sourceforge.net/parallel/) or other mechanism) will actually run slower than on host CPU. +!!! note + Calculations that do not employ parallelism (either by using parallel MKL e.g. via matrix operations, fork() function, [parallel package](http://octave.sourceforge.net/parallel/) or other mechanism) will actually run slower than on host CPU. To use Octave on a node with Xeon Phi: diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md b/docs.it4i/anselm/software/numerical-languages/r.md similarity index 89% rename from docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md rename to docs.it4i/anselm/software/numerical-languages/r.md index 238e7106b70a2da05422d2fa4d089c372eef21bb..d70ea9026f50ed82ff789a232a21de97b7b472cb 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md +++ b/docs.it4i/anselm/software/numerical-languages/r.md @@ -1,8 +1,7 @@ -R -=== +# R + +## Introduction -Introduction ------------- The R is a language and environment for statistical computing and graphics. R provides a wide variety of statistical (linear and nonlinear modelling, classical statistical tests, time-series analysis, classification, clustering, ...) and graphical techniques, and is highly extensible. One of R's strengths is the ease with which well-designed publication-quality plots can be produced, including mathematical symbols and formulae where needed. Great care has been taken over the defaults for the minor design choices in graphics, but the user retains full control. @@ -13,24 +12,24 @@ Extensive support for parallel computing is available within R. Read more on <http://www.r-project.org/>, <http://cran.r-project.org/doc/manuals/r-release/R-lang.html> -Modules -------- +## Modules + The R version 3.0.1 is available on Anselm, along with GUI interface Rstudio -|Application|Version|module| -|---|---| -| **R**|R 3.0.1|R| -|**Rstudio**|Rstudio 0.97|Rstudio| +| Application | Version | module | +| ----------- | ------------ | ------- | +| **R** | R 3.0.1 | R | +| **Rstudio** | Rstudio 0.97 | Rstudio | ```bash $ module load R ``` -Execution ---------- +## Execution + The R on Anselm is linked to highly optimized MKL mathematical library. This provides threaded parallelization to many R kernels, notably the linear algebra subroutines. The R runs these heavy calculation kernels without any penalty. By default, the R would parallelize to 16 threads. You may control the threads by setting the OMP_NUM_THREADS environment variable. -### Interactive execution +### Interactive Execution To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 forwarding. Run rstudio: @@ -39,7 +38,7 @@ To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 $ rstudio ``` -### Batch execution +### Batch Execution To run R in batch mode, write an R script, then write a bash jobscript and execute via the qsub command. By default, R will use 16 threads when running MKL kernels. @@ -67,14 +66,14 @@ Example jobscript: exit ``` -This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section](../../resource-allocation-and-job-execution/job-submission-and-execution/). +This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section](../../job-submission-and-execution/). + +## Parallel R -Parallel R ----------- -Parallel execution of R may be achieved in many ways. One approach is the implied parallelization due to linked libraries or specially enabled functions, as [described above](r/#interactive-execution). In the following sections, we focus on explicit parallelization, where parallel constructs are directly stated within the R script. +Parallel execution of R may be achieved in many ways. One approach is the implied parallelization due to linked libraries or specially enabled functions, as [described above](r/#interactive-execution). In the following sections, we focus on explicit parallelization, where parallel constructs are directly stated within the R script. + +## Package Parallel -Package parallel --------------------- The package parallel provides support for parallel computation, including by forking (taken from package multicore), by sockets (taken from package snow) and random-number generation. The package is activated this way: @@ -96,12 +95,12 @@ Download the package [parallell](package-parallel-vignette.pdf) vignette. The forking is the most simple to use. Forking family of functions provide parallelized, drop in replacement for the serial apply() family of functions. -!!! Note "Note" - Forking via package parallel provides functionality similar to OpenMP construct +!!! note + Forking via package parallel provides functionality similar to OpenMP construct - omp parallel for + omp parallel for - Only cores of single node can be utilized this way! + Only cores of single node can be utilized this way! Forking example: @@ -145,10 +144,10 @@ The above example is the classic parallel example for calculating the number π. Every evaluation of the integrad function runs in parallel on different process. -Package Rmpi ------------- -!!! Note "Note" - package Rmpi provides an interface (wrapper) to MPI APIs. +## Package Rmpi + +!!! note + package Rmpi provides an interface (wrapper) to MPI APIs. It also provides interactive R slave environment. On Anselm, Rmpi provides interface to the [OpenMPI](../mpi-1/Running_OpenMPI/). @@ -160,9 +159,10 @@ When using package Rmpi, both openmpi and R modules must be loaded $ module load openmpi $ module load R ``` + Rmpi may be used in three basic ways. The static approach is identical to executing any other MPI programm. In addition, there is Rslaves dynamic MPI approach and the mpi.apply approach. In the following section, we will use the number π integration example, to illustrate all these concepts. -### static Rmpi +### Static Rmpi Static Rmpi programs are executed via mpiexec, as any other MPI programs. Number of processes is static - given at the launch time. @@ -220,7 +220,7 @@ Execute the example as: $ mpiexec R --slave --no-save --no-restore -f pi3.R ``` -### dynamic Rmpi +### Dynamic Rmpi Dynamic Rmpi programs are executed by calling the R directly. openmpi module must be still loaded. The R slave processes will be spawned by a function call within the Rmpi program. @@ -296,8 +296,8 @@ Execute the example as: mpi.apply is a specific way of executing Dynamic Rmpi programs. -!!! Note "Note" - mpi.apply() family of functions provide MPI parallelized, drop in replacement for the serial apply() family of functions. +!!! note + mpi.apply() family of functions provide MPI parallelized, drop in replacement for the serial apply() family of functions. Execution is identical to other dynamic Rmpi programs. @@ -359,13 +359,11 @@ Execute the example as: $ R --slave --no-save --no-restore -f pi3parSapply.R ``` -Combining parallel and Rmpi ---------------------------- +## Combining Parallel and Rmpi Currently, the two packages can not be combined for hybrid calculations. -Parallel execution ------------------- +## Parallel Execution The R parallel jobs are executed via the PBS queue system exactly as any other parallel jobs. User must create an appropriate jobscript and submit via the **qsub** @@ -377,7 +375,7 @@ Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, runnin #PBS -N Rjob #PBS -l select=100:ncpus=16:mpiprocs=16:ompthreads=1 - # change to scratch directory + # change to scratch directory SCRDIR=/scratch/$USER/myjob cd $SCRDIR || exit @@ -398,4 +396,4 @@ Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, runnin exit ``` -For more information about jobscript and MPI execution refer to the [Job submission](../../resource-allocation-and-job-execution/job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. +For more information about jobscript and MPI execution refer to the [Job submission](../../job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md b/docs.it4i/anselm/software/numerical-libraries/fftw.md similarity index 60% rename from docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md rename to docs.it4i/anselm/software/numerical-libraries/fftw.md index 8920a402fdc3f1d614a71cebc15bbda6313f115e..038e1223a44cde79a37f2f7fe59fab9f7e5a8e8e 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md +++ b/docs.it4i/anselm/software/numerical-libraries/fftw.md @@ -1,22 +1,21 @@ -FFTW -==== +# FFTW The discrete Fourier transform in one or more dimensions, MPI parallel -FFTW is a C subroutine library for computing the discrete Fourier transform in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, e.g. the discrete cosine/sine transforms or DCT/DST). The FFTW library allows for MPI parallel, in-place discrete Fourier transform, with data distributed over number of nodes. +FFTW is a C subroutine library for computing the discrete Fourier transform in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, e.g. the discrete cosine/sine transforms or DCT/DST). The FFTW library allows for MPI parallel, in-place discrete Fourier transform, with data distributed over number of nodes. Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules: - |Version |Parallelization |module |linker options | - | --- | --- | - |FFTW3 gcc3.3.3 |pthread, OpenMP |fftw3/3.3.3-gcc |-lfftw3, -lfftw3_threads-lfftw3_omp | - |FFTW3 icc3.3.3 |pthread, OpenMP |fftw3 |-lfftw3, -lfftw3_threads-lfftw3_omp | - |FFTW2 gcc2.1.5 |pthread |fftw2/2.1.5-gcc |-lfftw, -lfftw_threads | - |FFTW2 icc2.1.5 |pthread |fftw2 |-lfftw, -lfftw_threads | - |FFTW3 gcc3.3.3 |OpenMPI |fftw-mpi3/3.3.3-gcc |-lfftw3_mpi | - |FFTW3 icc3.3.3 |Intel MPI |fftw3-mpi |-lfftw3_mpi | - |FFTW2 gcc2.1.5 |OpenMPI |fftw2-mpi/2.1.5-gcc |-lfftw_mpi | - |FFTW2 gcc2.1.5 |IntelMPI |fftw2-mpi/2.1.5-gcc |-lfftw_mpi | +| Version | Parallelization | module | linker options | +| -------------- | --------------- | ------------------- | ----------------------------------- | +| FFTW3 gcc3.3.3 | pthread, OpenMP | fftw3/3.3.3-gcc | -lfftw3, -lfftw3_threads-lfftw3_omp | +| FFTW3 icc3.3.3 | pthread, OpenMP | fftw3 | -lfftw3, -lfftw3_threads-lfftw3_omp | +| FFTW2 gcc2.1.5 | pthread | fftw2/2.1.5-gcc | -lfftw, -lfftw_threads | +| FFTW2 icc2.1.5 | pthread | fftw2 | -lfftw, -lfftw_threads | +| FFTW3 gcc3.3.3 | OpenMPI | fftw-mpi3/3.3.3-gcc | -lfftw3_mpi | +| FFTW3 icc3.3.3 | Intel MPI | fftw3-mpi | -lfftw3_mpi | +| FFTW2 gcc2.1.5 | OpenMPI | fftw2-mpi/2.1.5-gcc | -lfftw_mpi | +| FFTW2 gcc2.1.5 | IntelMPI | fftw2-mpi/2.1.5-gcc | -lfftw_mpi | ```bash $ module load fftw3 @@ -24,8 +23,7 @@ Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each comp The module sets up environment variables, required for linking and running FFTW enabled applications. Make sure that the choice of FFTW module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results. -Example -------- +## Example ```cpp #include <fftw3-mpi.h> diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/gsl.md b/docs.it4i/anselm/software/numerical-libraries/gsl.md similarity index 68% rename from docs.it4i/anselm-cluster-documentation/software/numerical-libraries/gsl.md rename to docs.it4i/anselm/software/numerical-libraries/gsl.md index ab2efcd3674a0bf83e62f42f5b34fe65b99c3e63..6b5308df3dabbbfe12a8763a955562e311eff35a 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/gsl.md +++ b/docs.it4i/anselm/software/numerical-libraries/gsl.md @@ -1,58 +1,55 @@ -GSL -=== +# GSL The GNU Scientific Library. Provides a wide range of mathematical routines. -Introduction ------------- +## Introduction + The GNU Scientific Library (GSL) provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting. There are over 1000 functions in total. The routines have been written from scratch in C, and present a modern Applications Programming Interface (API) for C programmers, allowing wrappers to be written for very high level languages. The library covers a wide range of topics in numerical computing. Routines are available for the following areas: + Complex Numbers Roots of Polynomials - Complex Numbers Roots of Polynomials - - Special Functions Vectors and Matrices + Special Functions Vectors and Matrices - Permutations Combinations + Permutations Combinations - Sorting BLAS Support + Sorting BLAS Support - Linear Algebra CBLAS Library + Linear Algebra CBLAS Library - Fast Fourier Transforms Eigensystems + Fast Fourier Transforms Eigensystems - Random Numbers Quadrature + Random Numbers Quadrature - Random Distributions Quasi-Random Sequences + Random Distributions Quasi-Random Sequences - Histograms Statistics + Histograms Statistics - Monte Carlo Integration N-Tuples + Monte Carlo Integration N-Tuples - Differential Equations Simulated Annealing + Differential Equations Simulated Annealing Numerical Differentiation Interpolation - Series Acceleration Chebyshev Approximations + Series Acceleration Chebyshev Approximations - Root-Finding Discrete Hankel Transforms + Root-Finding Discrete Hankel Transforms - Least-Squares Fitting Minimization + Least-Squares Fitting Minimization - IEEE Floating-Point Physical Constants + IEEE Floating-Point Physical Constants - Basis Splines Wavelets + Basis Splines Wavelets -Modules -------- +## Modules The GSL 1.16 is available on Anselm, compiled for GNU and Intel compiler. These variants are available via modules: -|Module|Compiler| -|---|---| -| gsl/1.16-gcc|gcc 4.8.6| -|gsl/1.16-icc(default)|icc| +| Module | Compiler | +| --------------------- | --------- | +| gsl/1.16-gcc | gcc 4.8.6 | +| gsl/1.16-icc(default) | icc | ```bash $ module load gsl @@ -60,11 +57,11 @@ The GSL 1.16 is available on Anselm, compiled for GNU and Intel compiler. These The module sets up environment variables, required for linking and running GSL enabled applications. This particular command loads the default module, which is gsl/1.16-icc -Linking -------- +## Linking + Load an appropriate gsl module. Link using **-lgsl** switch to link your code against GSL. The GSL depends on cblas API to BLAS library, which must be supplied for linking. The BLAS may be provided, for example from the MKL library, as well as from the BLAS GSL library (-lgslcblas). Using the MKL is recommended. -### Compiling and linking with Intel compilers +### Compiling and Linking With Intel Compilers ```bash $ module load intel @@ -72,7 +69,7 @@ Load an appropriate gsl module. Link using **-lgsl** switch to link your code ag $ icc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -mkl -lgsl ``` -### Compiling and linking with GNU compilers +### Compiling and Linking With GNU Compilers ```bash $ module load gcc @@ -81,8 +78,8 @@ Load an appropriate gsl module. Link using **-lgsl** switch to link your code ag $ gcc myprog.c -o myprog.x -Wl,-rpath=$LIBRARY_PATH -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lgsl ``` -Example -------- +## Example + Following is an example of discrete wavelet transform implemented by GSL: ```cpp diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md b/docs.it4i/anselm/software/numerical-libraries/hdf5.md similarity index 57% rename from docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md rename to docs.it4i/anselm/software/numerical-libraries/hdf5.md index ae758f5e09d24495f2e64d340230d59448eb4b37..d9abd72c405ab3ff867203fbe7c9408e9e7c5d7c 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md +++ b/docs.it4i/anselm/software/numerical-libraries/hdf5.md @@ -1,5 +1,4 @@ -HDF5 -==== +# HDF5 Hierarchical Data Format library. Serial and MPI parallel version. @@ -7,15 +6,15 @@ Hierarchical Data Format library. Serial and MPI parallel version. Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules: - |Version |Parallelization |module |C linker options|C++ linker options|Fortran linker options | - | --- | --- | - |HDF5 icc serial |pthread |hdf5/1.8.11 |$HDF5_INC $HDF5_SHLIB |$HDF5_INC $HDF5_CPP_LIB |$HDF5_INC $HDF5_F90_LIB | - |HDF5 icc parallel MPI |pthread, IntelMPI |hdf5-parallel/1.8.11 |$HDF5_INC $HDF5_SHLIB |Not supported |$HDF5_INC $HDF5_F90_LIB | - |HDF5 icc serial |pthread |hdf5/1.8.13 |$HDF5_INC $HDF5_SHLIB |$HDF5_INC $HDF5_CPP_LIB |$HDF5_INC $HDF5_F90_LIB | - |HDF5 icc parallel MPI |pthread, IntelMPI |hdf5-parallel/1.8.13 |$HDF5_INC $HDF5_SHLIB |Not supported |$HDF5_INC $HDF5_F90_LIB | - |HDF5 gcc parallel MPI |pthread, OpenMPI 1.6.5, gcc 4.8.1 |hdf5-parallel/1.8.11-gcc |$HDF5_INC $HDF5_SHLIB |Not supported |$HDF5_INC $HDF5_F90_LIB | - |HDF5 gcc parallel MPI|pthread, OpenMPI 1.6.5, gcc 4.8.1 |hdf5-parallel/1.8.13-gcc |$HDF5_INC $HDF5_SHLIB |Not supported |$HDF5_INC $HDF5_F90_LIB | - |HDF5 gcc parallel MPI |pthread, OpenMPI 1.8.1, gcc 4.9.0 |hdf5-parallel/1.8.13-gcc49 |$HDF5_INC $HDF5_SHLIB |Not supported |$HDF5_INC $HDF5_F90_LIB | +| Version | Parallelization | module | C linker options | C++ linker options | Fortran linker options | +| --------------------- | --------------------------------- | -------------------------- | --------------------- | ----------------------- | ----------------------- | +| HDF5 icc serial | pthread | hdf5/1.8.11 | $HDF5_INC $HDF5_SHLIB | $HDF5_INC $HDF5_CPP_LIB | $HDF5_INC $HDF5_F90_LIB | +| HDF5 icc parallel MPI | pthread, IntelMPI | hdf5-parallel/1.8.11 | $HDF5_INC $HDF5_SHLIB | Not supported | $HDF5_INC $HDF5_F90_LIB | +| HDF5 icc serial | pthread | hdf5/1.8.13 | $HDF5_INC $HDF5_SHLIB | $HDF5_INC $HDF5_CPP_LIB | $HDF5_INC $HDF5_F90_LIB | +| HDF5 icc parallel MPI | pthread, IntelMPI | hdf5-parallel/1.8.13 | $HDF5_INC $HDF5_SHLIB | Not supported | $HDF5_INC $HDF5_F90_LIB | +| HDF5 gcc parallel MPI | pthread, OpenMPI 1.6.5, gcc 4.8.1 | hdf5-parallel/1.8.11-gcc | $HDF5_INC $HDF5_SHLIB | Not supported | $HDF5_INC $HDF5_F90_LIB | +| HDF5 gcc parallel MPI | pthread, OpenMPI 1.6.5, gcc 4.8.1 | hdf5-parallel/1.8.13-gcc | $HDF5_INC $HDF5_SHLIB | Not supported | $HDF5_INC $HDF5_F90_LIB | +| HDF5 gcc parallel MPI | pthread, OpenMPI 1.8.1, gcc 4.9.0 | hdf5-parallel/1.8.13-gcc49 | $HDF5_INC $HDF5_SHLIB | Not supported | $HDF5_INC $HDF5_F90_LIB | ```bash $ module load hdf5-parallel @@ -23,13 +22,12 @@ Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, comp The module sets up environment variables, required for linking and running HDF5 enabled applications. Make sure that the choice of HDF5 module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results. -!!! Note "Note" - Be aware, that GCC version of **HDF5 1.8.11** has serious performance issues, since it's compiled with -O0 optimization flag. This version is provided only for testing of code compiled only by GCC and IS NOT recommended for production computations. For more information, please see: <http://www.hdfgroup.org/ftp/HDF5/prev-releases/ReleaseFiles/release5-1811> +!!! note + Be aware, that GCC version of **HDF5 1.8.11** has serious performance issues, since it's compiled with -O0 optimization flag. This version is provided only for testing of code compiled only by GCC and IS NOT recommended for production computations. For more information, please see: <http://www.hdfgroup.org/ftp/HDF5/prev-releases/ReleaseFiles/release5-1811> - All GCC versions of **HDF5 1.8.13** are not affected by the bug, are compiled with -O3 optimizations and are recommended for production computations. + All GCC versions of **HDF5 1.8.13** are not affected by the bug, are compiled with -O3 optimizations and are recommended for production computations. -Example -------- +## Example ```cpp #include "hdf5.h" @@ -86,6 +84,6 @@ Load modules and compile: $ mpicc hdf5test.c -o hdf5test.x -Wl,-rpath=$LIBRARY_PATH $HDF5_INC $HDF5_SHLIB ``` -Run the example as [Intel MPI program](../anselm-cluster-documentation/software/mpi/running-mpich2/). +Run the example as [Intel MPI program](../mpi/running-mpich2/). For further information, please see the website: <http://www.hdfgroup.org/HDF5/> diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/intel-numerical-libraries.md b/docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md similarity index 86% rename from docs.it4i/anselm-cluster-documentation/software/numerical-libraries/intel-numerical-libraries.md rename to docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md index 0683f6bd1a65e784f3277bf980be437ecf07a21d..8a79b9961d7f158bb369dc65f6ea6e21896b09ac 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/intel-numerical-libraries.md +++ b/docs.it4i/anselm/software/numerical-libraries/intel-numerical-libraries.md @@ -1,10 +1,9 @@ -Intel numerical libraries -========================= +# Intel numerical libraries Intel libraries for high performance in numerical computing -Intel Math Kernel Library -------------------------- +## Intel Math Kernel Library + Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL unites and provides these basic components: BLAS, LAPACK, ScaLapack, PARDISO, FFT, VML, VSL, Data fitting, Feast Eigensolver and many more. ```bash @@ -13,8 +12,8 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e Read more at the [Intel MKL](../intel-suite/intel-mkl/) page. -Intel Integrated Performance Primitives ---------------------------------------- +## Intel Integrated Performance Primitives + Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is available, via module ipp. The IPP is a library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax and many more. ```bash @@ -23,8 +22,8 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai Read more at the [Intel IPP](../intel-suite/intel-integrated-performance-primitives/) page. -Intel Threading Building Blocks -------------------------------- +## Intel Threading Building Blocks + Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. It is designed to promote scalable data parallel programming. Additionally, it fully supports nested parallelism, so you can build larger parallel components from smaller parallel components. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. ```bash diff --git a/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md b/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md new file mode 100644 index 0000000000000000000000000000000000000000..8ce0b79e0ce63aff1cfea48f72e009ad111a79a1 --- /dev/null +++ b/docs.it4i/anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md @@ -0,0 +1,79 @@ +# MAGMA for Intel Xeon Phi + +Next generation dense algebra library for heterogeneous systems with accelerators + +## Compiling and Linking With MAGMA + +To be able to compile and link code with MAGMA library user has to load following module: + +```bash + $ module load magma/1.3.0-mic +``` + +To make compilation more user friendly module also sets these two environment variables: + +!!! note + MAGMA_INC - contains paths to the MAGMA header files (to be used for compilation step) + +!!! note + MAGMA_LIBS - contains paths to MAGMA libraries (to be used for linking step). + +Compilation example: + +```bash + $ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall $MAGMA_INC -c testing_dgetrf_mic.cpp -o testing_dgetrf_mic.o + + $ icc -mkl -O3 -DHAVE_MIC -DADD_ -Wall -fPIC -Xlinker -zmuldefs -Wall -DNOCHANGE -DHOST testing_dgetrf_mic.o -o testing_dgetrf_mic $MAGMA_LIBS +``` + +### Running MAGMA Code + +MAGMA implementation for Intel MIC requires a MAGMA server running on accelerator prior to executing the user application. The server can be started and stopped using following scripts: + +!!! note + To start MAGMA server use: + **$MAGMAROOT/start_magma_server** + +!!! note + To stop the server use: + **$MAGMAROOT/stop_magma_server** + +!!! note + For deeper understanding how the MAGMA server is started, see the following script: + **$MAGMAROOT/launch_anselm_from_mic.sh** + +To test if the MAGMA server runs properly we can run one of examples that are part of the MAGMA installation: + +```bash + [user@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic + + [user@cn204 ~]$ export OMP_NUM_THREADS=16 + + [lriha@cn204 ~]$ $MAGMAROOT/testing/testing_dgetrf_mic + Usage: /apps/libs/magma-mic/magmamic-1.3.0/testing/testing_dgetrf_mic [options] [-h|--help] + + M N CPU GFlop/s (sec) MAGMA GFlop/s (sec) ||PA-LU||/(||A||*N) + ========================================================================= + 1088 1088 --- ( --- ) 13.93 ( 0.06) --- + 2112 2112 --- ( --- ) 77.85 ( 0.08) --- + 3136 3136 --- ( --- ) 183.21 ( 0.11) --- + 4160 4160 --- ( --- ) 227.52 ( 0.21) --- + 5184 5184 --- ( --- ) 258.61 ( 0.36) --- + 6208 6208 --- ( --- ) 333.12 ( 0.48) --- + 7232 7232 --- ( --- ) 416.52 ( 0.61) --- + 8256 8256 --- ( --- ) 446.97 ( 0.84) --- + 9280 9280 --- ( --- ) 461.15 ( 1.16) --- + 10304 10304 --- ( --- ) 500.70 ( 1.46) --- +``` + +!!! hint + MAGMA contains several benchmarks and examples in `$MAGMAROOT/testing/` + +!!! note + MAGMA relies on the performance of all CPU cores as well as on the performance of the accelerator. Therefore on Anselm number of CPU OpenMP threads has to be set to 16 with `export OMP_NUM_THREADS=16`. + +See more details at [MAGMA home page](http://icl.cs.utk.edu/magma/). + +## References + +[1] MAGMA MIC: Linear Algebra Library for Intel Xeon Phi Coprocessors, Jack Dongarra et. al, <http://icl.utk.edu/projectsfiles/magma/pubs/24-MAGMA_MIC_03.pdf> diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md b/docs.it4i/anselm/software/numerical-libraries/petsc.md similarity index 53% rename from docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md rename to docs.it4i/anselm/software/numerical-libraries/petsc.md index 8cdcd2b8e57550ccc2a5baeedc71835950121aee..528d13ddbcaffdc9f8b0a80bee379b05602317d7 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md +++ b/docs.it4i/anselm/software/numerical-libraries/petsc.md @@ -1,22 +1,20 @@ -PETSc -===== +# PETSc PETSc is a suite of building blocks for the scalable solution of scientific and engineering applications modeled by partial differential equations. It supports MPI, shared memory, and GPU through CUDA or OpenCL, as well as hybrid MPI-shared memory or MPI-GPU parallelism. -Introduction ------------- +## Introduction + PETSc (Portable, Extensible Toolkit for Scientific Computation) is a suite of building blocks (data structures and routines) for the scalable solution of scientific and engineering applications modelled by partial differential equations. It allows thinking in terms of high-level objects (matrices) instead of low-level objects (raw arrays). Written in C language but can also be called from FORTRAN, C++, Python and Java codes. It supports MPI, shared memory, and GPUs through CUDA or OpenCL, as well as hybrid MPI-shared memory or MPI-GPU parallelism. -Resources ---------- -- [project webpage](http://www.mcs.anl.gov/petsc/) -- [documentation](http://www.mcs.anl.gov/petsc/documentation/) - - [PETSc Users Manual (PDF)](http://www.mcs.anl.gov/petsc/petsc-current/docs/manual.pdf) - - [index of all manual pages](http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/singleindex.html) -- PRACE Video Tutorial [part1](http://www.youtube.com/watch?v=asVaFg1NDqY), [part2](http://www.youtube.com/watch?v=ubp_cSibb9I), [part3](http://www.youtube.com/watch?v=vJAAAQv-aaw), [part4](http://www.youtube.com/watch?v=BKVlqWNh8jY), [part5](http://www.youtube.com/watch?v=iXkbLEBFjlM) +## Resources + +* [project webpage](http://www.mcs.anl.gov/petsc/) +* [documentation](http://www.mcs.anl.gov/petsc/documentation/) + * [PETSc Users Manual (PDF)](http://www.mcs.anl.gov/petsc/petsc-current/docs/manual.pdf) + * [index of all manual pages](http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/singleindex.html) +* PRACE Video Tutorial [part1](http://www.youtube.com/watch?v=asVaFg1NDqY), [part2](http://www.youtube.com/watch?v=ubp_cSibb9I), [part3](http://www.youtube.com/watch?v=vJAAAQv-aaw), [part4](http://www.youtube.com/watch?v=BKVlqWNh8jY), [part5](http://www.youtube.com/watch?v=iXkbLEBFjlM) -Modules -------- +## Modules You can start using PETSc on Anselm by loading the PETSc module. Module names obey this pattern: @@ -25,10 +23,10 @@ You can start using PETSc on Anselm by loading the PETSc module. Module names ob module load petsc/3.4.4-icc-impi-mkl-opt ``` -where `variant` is replaced by one of `{dbg, opt, threads-dbg, threads-opt}`. The `opt` variant is compiled without debugging information (no `-g` option) and with aggressive compiler optimizations (`-O3 -xAVX`). This variant is suitable for performance measurements and production runs. In all other cases use the debug (`dbg`) variant, because it contains debugging information, performs validations and self-checks, and provides a clear stack trace and message in case of an error. The other two variants `threads-dbg` and `threads-opt` are `dbg` and `opt`, respectively, built with [OpenMP and pthreads threading support](http://www.mcs.anl.gov/petsc/features/threads.html). +where `variant` is replaced by one of `{dbg, opt, threads-dbg, threads-opt}`. The `opt` variant is compiled without debugging information (no `-g` option) and with aggressive compiler optimizations (`-O3 -xAVX`). This variant is suitable for performance measurements and production runs. In all other cases use the debug (`dbg`) variant, because it contains debugging information, performs validations and self-checks, and provides a clear stack trace and message in case of an error. The other two variants `threads-dbg` and `threads-opt` are `dbg` and `opt`, respectively, built with [OpenMP and pthreads threading support](https://www.mcs.anl.gov/petsc/miscellaneous/petscthreads.html). + +## External Libraries -External libraries ------------------- PETSc needs at least MPI, BLAS and LAPACK. These dependencies are currently satisfied with Intel MPI and Intel MKL in Anselm `petsc` modules. PETSc can be linked with a plethora of [external numerical libraries](http://www.mcs.anl.gov/petsc/miscellaneous/external.html), extending PETSc functionality, e.g. direct linear system solvers, preconditioners or partitioners. See below a list of libraries currently included in Anselm `petsc` modules. @@ -36,27 +34,27 @@ PETSc can be linked with a plethora of [external numerical libraries](http://www All these libraries can be used also alone, without PETSc. Their static or shared program libraries are available in `$PETSC_DIR/$PETSC_ARCH/lib` and header files in `$PETSC_DIR/$PETSC_ARCH/include`. `PETSC_DIR` and `PETSC_ARCH` are environment variables pointing to a specific PETSc instance based on the petsc module loaded. -### Libraries linked to PETSc on Anselm (as of 11 April 2015) - -- dense linear algebra - - [Elemental](http://libelemental.org/) -- sparse linear system solvers - - [Intel MKL Pardiso](https://software.intel.com/en-us/node/470282) - - [MUMPS](http://mumps.enseeiht.fr/) - - [PaStiX](http://pastix.gforge.inria.fr/) - - [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html) - - [SuperLU](http://crd.lbl.gov/~xiaoye/SuperLU/#superlu) - - [SuperLU_Dist](http://crd.lbl.gov/~xiaoye/SuperLU/#superlu_dist) -- input/output - - [ExodusII](http://sourceforge.net/projects/exodusii/) - - [HDF5](http://www.hdfgroup.org/HDF5/) - - [NetCDF](http://www.unidata.ucar.edu/software/netcdf/) -- partitioning - - [Chaco](http://www.cs.sandia.gov/CRF/chac.html) - - [METIS](http://glaros.dtc.umn.edu/gkhome/metis/metis/overview) - - [ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview) - - [PT-Scotch](http://www.labri.fr/perso/pelegrin/scotch/) -- preconditioners & multigrid - - [Hypre](http://acts.nersc.gov/hypre/) - - [Trilinos ML](http://trilinos.sandia.gov/packages/ml/) - - [SPAI - Sparse Approximate Inverse](https://bitbucket.org/petsc/pkg-spai) +### Libraries Linked to PETSc on Anselm (As of 11 April 2015) + +* dense linear algebra + * [Elemental](http://libelemental.org/) +* sparse linear system solvers + * [Intel MKL Pardiso](https://software.intel.com/en-us/node/470282) + * [MUMPS](http://mumps.enseeiht.fr/) + * [PaStiX](http://pastix.gforge.inria.fr/) + * [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html) + * [SuperLU](http://crd.lbl.gov/~xiaoye/SuperLU/#superlu) + * [SuperLU_Dist](http://crd.lbl.gov/~xiaoye/SuperLU/#superlu_dist) +* input/output + * [ExodusII](http://sourceforge.net/projects/exodusii/) + * [HDF5](http://www.hdfgroup.org/HDF5/) + * [NetCDF](http://www.unidata.ucar.edu/software/netcdf/) +* partitioning + * [Chaco](http://www.cs.sandia.gov/CRF/chac.html) + * [METIS](http://glaros.dtc.umn.edu/gkhome/metis/metis/overview) + * [ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview) + * [PT-Scotch](http://www.labri.fr/perso/pelegrin/scotch/) +* preconditioners & multigrid + * [Hypre](http://www.nersc.gov/users/software/programming-libraries/math-libraries/petsc/) + * [Trilinos ML](http://trilinos.sandia.gov/packages/ml/) + * [SPAI - Sparse Approximate Inverse](https://bitbucket.org/petsc/pkg-spai) diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md b/docs.it4i/anselm/software/numerical-libraries/trilinos.md similarity index 63% rename from docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md rename to docs.it4i/anselm/software/numerical-libraries/trilinos.md index e4ca472867371b26240e5d5ade9f0c9f441d265e..42f8bc0dc4ca5318cca883193e5fc61eb207b9b1 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md +++ b/docs.it4i/anselm/software/numerical-libraries/trilinos.md @@ -1,31 +1,30 @@ -Trilinos -======== +# Trilinos Packages for large scale scientific and engineering problems. Provides MPI and hybrid parallelization. -### Introduction +## Introduction Trilinos is a collection of software packages for the numerical solution of large scale scientific and engineering problems. It is based on C++ and features modern object-oriented design. Both serial as well as parallel computations based on MPI and hybrid parallelization are supported within Trilinos packages. -### Installed packages +## Installed Packages Current Trilinos installation on ANSELM contains (among others) the following main packages -- **Epetra** - core linear algebra package containing classes for manipulation with serial and distributed vectors, matrices, and graphs. Dense linear solvers are supported via interface to BLAS and LAPACK (Intel MKL on ANSELM). Its extension **EpetraExt** contains e.g. methods for matrix-matrix multiplication. -- **Tpetra** - next-generation linear algebra package. Supports 64-bit indexing and arbitrary data type using C++ templates. -- **Belos** - library of various iterative solvers (CG, block CG, GMRES, block GMRES etc.). -- **Amesos** - interface to direct sparse solvers. -- **Anasazi** - framework for large-scale eigenvalue algorithms. -- **IFPACK** - distributed algebraic preconditioner (includes e.g. incomplete LU factorization) -- **Teuchos** - common tools packages. This package contains classes for memory management, output, performance monitoring, BLAS and LAPACK wrappers etc. +* **Epetra** - core linear algebra package containing classes for manipulation with serial and distributed vectors, matrices, and graphs. Dense linear solvers are supported via interface to BLAS and LAPACK (Intel MKL on ANSELM). Its extension **EpetraExt** contains e.g. methods for matrix-matrix multiplication. +* **Tpetra** - next-generation linear algebra package. Supports 64-bit indexing and arbitrary data type using C++ templates. +* **Belos** - library of various iterative solvers (CG, block CG, GMRES, block GMRES etc.). +* **Amesos** - interface to direct sparse solvers. +* **Anasazi** - framework for large-scale eigenvalue algorithms. +* **IFPACK** - distributed algebraic preconditioner (includes e.g. incomplete LU factorization) +* **Teuchos** - common tools packages. This package contains classes for memory management, output, performance monitoring, BLAS and LAPACK wrappers etc. For the full list of Trilinos packages, descriptions of their capabilities, and user manuals see [http://trilinos.sandia.gov.](http://trilinos.sandia.gov) -### Installed version +## Installed Version Currently, Trilinos in version 11.2.3 compiled with Intel Compiler is installed on ANSELM. -### Compiling against Trilinos +## Compiling Against Trilinos First, load the appropriate module: diff --git a/docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md b/docs.it4i/anselm/software/nvidia-cuda.md similarity index 94% rename from docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md rename to docs.it4i/anselm/software/nvidia-cuda.md index 2161674281cbc34891aa015bbdd1a22ba4d55f24..392811efa72e5275307c29c34a13b462c688827e 100644 --- a/docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md +++ b/docs.it4i/anselm/software/nvidia-cuda.md @@ -1,10 +1,9 @@ -NVIDIA CUDA -=========== +# NVIDIA CUDA -##A guide to NVIDIA CUDA programming and GPU usage +Guide to NVIDIA CUDA Programming and GPU Usage + +## CUDA Programming on Anselm -CUDA Programming on Anselm --------------------------- The default programming model for GPU accelerators on Anselm is Nvidia CUDA. To set up the environment for CUDA use ```bash @@ -87,7 +86,7 @@ Expected output of the deviceQuery example executed on a node with Tesla K20m is deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 5.0, CUDA Runtime Version = 5.0, NumDevs = 1, Device0 = Tesla K20m ``` -### Code example +### Code Example In this section we provide a basic CUDA based vector addition code example. You can directly copy and paste the code to test it. @@ -193,14 +192,13 @@ To run the code use interactive PBS session to get access to one of the GPU acce $ ./test.cuda ``` -CUDA Libraries --------------- +## CUDA Libraries ### cuBLAS The NVIDIA CUDA Basic Linear Algebra Subroutines (cuBLAS) library is a GPU-accelerated version of the complete standard BLAS library with 152 standard BLAS routines. Basic description of the library together with basic performance comparison with MKL can be found [here](https://developer.nvidia.com/cublas "Nvidia cuBLAS"). -**cuBLAS example: SAXPY** +#### cuBLAS Example: SAXPY SAXPY function multiplies the vector x by the scalar alpha and adds it to the vector y overwriting the latest vector with the result. The description of the cuBLAS function can be found in [NVIDIA CUDA documentation](http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-axpy "Nvidia CUDA documentation "). Code can be pasted in the file and compiled without any modification. @@ -282,11 +280,10 @@ SAXPY function multiplies the vector x by the scalar alpha and adds it to the ve } ``` -!!! Note "Note" - Please note: cuBLAS has its own function for data transfers between CPU and GPU memory: - - - [cublasSetVector](http://docs.nvidia.com/cuda/cublas/index.html#cublassetvector) - transfers data from CPU to GPU memory - - [cublasGetVector](http://docs.nvidia.com/cuda/cublas/index.html#cublasgetvector) - transfers data from GPU to CPU memory +!!! note + cuBLAS has its own function for data transfers between CPU and GPU memory: + - [cublasSetVector](http://docs.nvidia.com/cuda/cublas/index.html#cublassetvector) - transfers data from CPU to GPU memory + - [cublasGetVector](http://docs.nvidia.com/cuda/cublas/index.html#cublasgetvector) - transfers data from GPU to CPU memory To compile the code using NVCC compiler a "-lcublas" compiler flag has to be specified: diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md b/docs.it4i/anselm/software/omics-master/diagnostic-component-team.md similarity index 71% rename from docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md rename to docs.it4i/anselm/software/omics-master/diagnostic-component-team.md index 0fbdc214f727d55f454f844dfde02b9c2eca226d..d8d0c4fc4e26a25550cb96b6dbe16a7a587fecf5 100644 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md +++ b/docs.it4i/anselm/software/omics-master/diagnostic-component-team.md @@ -1,14 +1,13 @@ -Diagnostic component (TEAM) -=========================== +# Diagnostic component (TEAM) -### Access +## Access -TEAM is available at the following address: <http://omics.it4i.cz/team/> +TEAM is available at the [following address](http://omics.it4i.cz/team/) -!!! Note "Note" - The address is accessible only via VPN. +!!! note + The address is accessible only via VPN. -### Diagnostic component (TEAM) +## Diagnostic Component VCF files are scanned by this diagnostic tool for known diagnostic disease-associated variants. When no diagnostic mutation is found, the file can be sent to the disease-causing gene discovery tool to see whether new disease associated variants can be found. @@ -16,4 +15,4 @@ TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between th  -**Figure 5.** Interface of the application. Panels for defining targeted regions of interest can be set up by just drag and drop known disease genes or disease definitions from the lists. Thus, virtual panels can be interactively improved as the knowledge of the disease increases. +** Figure 5. **Interface of the application. Panels for defining targeted regions of interest can be set up by just drag and drop known disease genes or disease definitions from the lists. Thus, virtual panels can be interactively improved as the knowledge of the disease increases. diff --git a/docs.it4i/anselm/software/omics-master/overview.md b/docs.it4i/anselm/software/omics-master/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..7a827c97b6cd5212bc932779b870e4ac8f8464cf --- /dev/null +++ b/docs.it4i/anselm/software/omics-master/overview.md @@ -0,0 +1,391 @@ +# Overview + +The human NGS data processing solution + +## Introduction + +The scope of this OMICS MASTER solution is restricted to human genomics research (disease causing gene discovery in whole human genome or exome) or diagnosis (panel sequencing), although it could be extended in the future to other usages. + +The pipeline inputs the raw data produced by the sequencing machines and undergoes a processing procedure that consists on a quality control, the mapping and variant calling steps that result in a file containing the set of variants in the sample. From this point, the prioritization component or the diagnostic component can be launched. + + + +Figure 1. OMICS MASTER solution overview. Data is produced in the external labs and comes to IT4I (represented by the blue dashed line). The data pre-processor converts raw data into a list of variants and annotations for each sequenced patient. These lists files together with primary and secondary (alignment) data files are stored in IT4I sequence DB and uploaded to the discovery (candidate prioritization) or diagnostic component where they can be analyzed directly by the user that produced them, depending of the experimental design carried out. + +Typical genomics pipelines are composed by several components that need to be launched manually. The advantage of OMICS MASTER pipeline is that all these components are invoked sequentially in an automated way. + +OMICS MASTER pipeline inputs a FASTQ file and outputs an enriched VCF file. This pipeline is able to queue all the jobs to PBS by only launching a process taking all the necessary input files and creates the intermediate and final folders + +Let’s see each of the OMICS MASTER solution components: + +## Components + +### Processing + +This component is composed by a set of programs that carry out quality controls, alignment, realignment, variant calling and variant annotation. It turns raw data from the sequencing machine into files containing lists of variants (VCF) that once annotated, can be used by the following components (discovery and diagnosis). + +We distinguish three types of sequencing instruments: bench sequencers (MySeq, IonTorrent, and Roche Junior, although this last one is about being discontinued), which produce relatively Genomes in the clinic + +low throughput (tens of million reads), and high end sequencers, which produce high throughput (hundreds of million reads) among which we have Illumina HiSeq 2000 (and new models) and SOLiD. All of them but SOLiD produce data in sequence format. SOLiD produces data in a special format called colour space that require of specific software for the mapping process. Once the mapping has been done, the rest of the pipeline is identical. Anyway, SOLiD is a technology which is also about being discontinued by the manufacturer so, this type of data will be scarce in the future. + +#### Quality Control, Preprocessing and Statistics for FASTQ + +FastQC& FastQC. + +These steps are carried out over the original FASTQ file with optimized scripts and includes the following steps: sequence cleansing, estimation of base quality scores, elimination of duplicates and statistics. + +Input: FASTQ file. + +Output: FASTQ file plus an HTML file containing statistics on the data. + +FASTQ format It represents the nucleotide sequence and its corresponding quality scores. + + +Figure 2.FASTQ file. + +#### Mapping + +Component: Hpg-aligner. + +Sequence reads are mapped over the human reference genome. SOLiD reads are not covered by this solution; they should be mapped with specific software (among the few available options, SHRiMP seems to be the best one). For the rest of NGS machine outputs we use HPG Aligner. HPG-Aligner is an innovative solution, based on a combination of mapping with BWT and local alignment with Smith-Waterman (SW), that drastically increases mapping accuracy (97% versus 62-70% by current mappers, in the most common scenarios). This proposal provides a simple and fast solution that maps almost all the reads, even those containing a high number of mismatches or indels. + +Input: FASTQ file. + +Output: Aligned file in BAM format. + +#### Sequence Alignment/Map (SAM) + +It is a human readable tab-delimited format in which each read and its alignment is represented on a single line. The format can represent unmapped reads, reads that are mapped to unique locations, and reads that are mapped to multiple locations. + +The SAM format (1) consists of one header section and one alignment section. The lines in the header section start with character ‘@’, and lines in the alignment section do not. All lines are TAB delimited. + +In SAM, each alignment line has 11 mandatory fields and a variable number of optional fields. The mandatory fields are briefly described in Table 1. They must be present but their value can be a ‘\’ or a zero (depending on the field) if the +corresponding information is unavailable. + +| No. | Name | Description | +| --------- | ---------- | ----------------------------------------------------- | +| 1 | QNAME | Query NAME of the read or the read pai | +| 2 | FLAG | Bitwise FLAG (pairing,strand,mate strand,etc.) | +| 3 | RNAME | <p>Reference sequence NAME | +| 4 | POS | <p>1-Based leftmost POSition of clipped alignment | +| 5 | MAPQ | <p>MAPping Quality (Phred-scaled) | +| 6 | CIGAR | <p>Extended CIGAR string (operations:MIDNSHP) | +| 7 | MRNM | <p>Mate REference NaMe ('=' if same RNAME) | +| 8 | MPOS | <p>1-Based leftmost Mate POSition | +| 9 | ISIZE | <p>Inferred Insert SIZE | +| 10 | SEQ | <p>Query SEQuence on the same strand as the reference | +| 11 | QUAL | <p>Query QUALity (ASCII-33=Phred base quality) | + + Table 1 . Mandatory fields in the SAM format. + +The standard CIGAR description of pairwise alignment defines three operations: ‘M’ for match/mismatch, ‘I’ for insertion compared with the reference and ‘D’ for deletion. The extended CIGAR proposed in SAM added four more operations: ‘N’ for skipped bases on the reference, ‘S’ for soft clipping, ‘H’ for hard clipping and ‘P’ for padding. These support splicing, clipping, multi-part and padded alignments. Figure 3 shows examples of CIGAR strings for different types of alignments. + + + + Figure 3 . SAM format file. The ‘@SQ’ line in the header section gives the order of reference sequences. Notably, r001 is the name of a read pair. According to FLAG 163 (=1+2+32+128), the read mapped to position 7 is the second read in the pair (128) and regarded as properly paired (1 + 2); its mate is mapped to 37 on the reverse strand (32). Read r002 has three soft-clipped (unaligned) bases. The coordinate shown in SAM is the position of the first aligned base. The CIGAR string for this alignment contains a P (padding) operation which correctly aligns the inserted sequences. Padding operations can be absent when an aligner does not support multiple sequence alignment. The last six bases of read r003 map to position 9, and the first five to position 29 on the reverse strand. The hard clipping operation H indicates that the clipped sequence is not present in the sequence field. The NM tag gives the number of mismatches. Read r004 is aligned across an intron, indicated by the N operation. + +##### Binary Alignment/Map (BAM) + +BAM is the binary representation of SAM and keeps exactly the same information as SAM. BAM uses lossless compression to reduce the size of the data by about 75% and provides an indexing system that allows reads that overlap a region of the genome to be retrieved and rapidly traversed. + +#### Quality Control, Preprocessing and Statistics for BAM + +Component: Hpg-Fastq & FastQC. + +Some features + + Quality control + reads with N errors + reads with multiple mappings + strand bias + paired-end insert + Filtering: by number of errors, number of hits + Comparator: stats, intersection, ... + +Input: BAM file. + +Output: BAM file plus an HTML file containing statistics. + +#### Variant Calling + +Component: GATK. + +Identification of single nucleotide variants and indels on the alignments is performed using the Genome Analysis Toolkit (GATK). GATK (2) is a software package developed at the Broad Institute to analyze high-throughput sequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. + +Input: BAM + +Output:VCF + +Variant Call Format (VCF) + +VCF (3) is a standardized format for storing the most prevalent types of sequence variation, including SNPs, indels and larger structural variants, together with rich annotations. The format was developed with the primary intention to represent human genetic variation, but its use is not restricted to diploid genomes and can be used in different contexts as well. Its flexibility and user extensibility allows representation of a wide variety of genomic variation with respect to a single reference sequence. + +A VCF file consists of a header section and a data section. The header contains an arbitrary number of metainformation lines, each starting with characters ‘##’, and a TAB delimited field definition line, starting with a single ‘#’ character. The meta-information header lines provide a standardized description of tags and annotations used in the data section. The use of meta-information allows the information stored within a VCF file to be tailored to the dataset in question. It can be also used to provide information about the means of file creation, date of creation, version of the reference sequence, software used and any other information relevant to the history of the file. The field definition line names eight mandatory columns, corresponding to data columns representing the chromosome (CHROM), a 1-based position of the start of the variant (POS), unique identifiers of the variant (ID), the reference allele (REF), a comma separated list of alternate non-reference alleles (ALT), a phred-scaled quality score (QUAL), site filtering information (FILTER) and a semicolon separated list of additional, user extensible annotation (INFO). In addition, if samples are present in the file, the mandatory header columns are followed by a FORMAT column and an arbitrary number of sample IDs that define the samples included in the VCF file. The FORMAT column is used to define the information contained within each subsequent genotype column, which consists of a colon separated list of fields. For example, the FORMAT field GT:GQ:DP in the fourth data entry of Figure 1a indicates that the subsequent entries contain information regarding the genotype, genotype quality and read depth for each sample. All data lines are TAB delimited and the number of fields in each data line must match the number of fields in the header line. It is strongly recommended that all annotation tags used are declared in the VCF header section. + + + + Figure 4 . (a) Example of valid VCF. The header lines ##fileformat and #CHROM are mandatory, the rest is optional but strongly recommended. Each line of the body describes variants present in the sampled population at one genomic position or region. All alternate alleles are listed in the ALT column and referenced from the genotype fields as 1-based indexes to this list; the reference haplotype is designated as 0. For multiploid data, the separator indicates whether the data are phased (|) or unphased (/). Thus, the two alleles C and G at the positions 2 and 5 in this figure occur on the same chromosome in SAMPLE1. The first data line shows an example of a deletion (present in SAMPLE1) and a replacement of two bases by another base (SAMPLE2); the second line shows a SNP and an insertion; the third a SNP; the fourth a large structural variant described by the annotation in the INFO column, the coordinate is that of the base before the variant. (b–f ) Alignments and VCF representations of different sequence variants: SNP, insertion, deletion, replacement, and a large deletion. The REF columns shows the reference bases replaced by the haplotype in the ALT column. The coordinate refers to the first reference base. (g) Users are advised to use simplest representation possible and lowest coordinate in cases where the position is ambiguous. + +### Annotating + +Component: HPG-Variant + +The functional consequences of every variant found are then annotated using the HPG-Variant software, which extracts from CellBase, the Knowledge database, all the information relevant on the predicted pathologic effect of the variants. + +VARIANT (VARIant Analysis Tool) (4) reports information on the variants found that include consequence type and annotations taken from different databases and repositories (SNPs and variants from dbSNP and 1000 genomes, and disease-related variants from the Genome-Wide Association Study (GWAS) catalog, Online Mendelian Inheritance in Man (OMIM), Catalog of Somatic Mutations in Cancer (COSMIC) mutations, etc. VARIANT also produces a rich variety of annotations that include information on the regulatory (transcription factor or miRNAbinding sites, etc.) or structural roles, or on the selective pressures on the sites affected by the variation. This information allows extending the conventional reports beyond the coding regions and expands the knowledge on the contribution of non-coding or synonymous variants to the phenotype studied. + + Input: VCF + + Output: The output of this step is the Variant Calling Format (VCF) file, which contains changes with respect to the reference genome with the corresponding QC and functional annotations. + +#### CellBase + +CellBase(5) is a relational database integrates biological information from different sources and includes: + +Core features + +We took genome sequences, genes, transcripts, exons, cytobands or cross references (xrefs) identifiers (IDs) from Ensembl (6). Protein information including sequences, xrefs or protein features (natural variants, mutagenesis sites, post-translational modifications, etc.) were imported from UniProt (7). + +Regulatory + +CellBase imports miRNA from miRBase (8); curated and non-curated miRNA targets from miRecords (9), miRTarBase (10), +TargetScan(11) and microRNA.org (12) and CpG islands and conserved regions from the UCSC database (13). + +Functional annotation + +OBO Foundry (14) develops many biomedical ontologies that are implemented in OBO format. We designed a SQL schema to store these OBO ontologies and 30 ontologies were imported. OBO ontology term annotations were taken from Ensembl (6). InterPro (15) annotations were also imported. + +Variation + +CellBase includes SNPs from dbSNP (16)^; SNP population frequencies from HapMap (17), 1000 genomes project (18) and Ensembl (6); phenotypically annotated SNPs were imported from NHRI GWAS Catalog (19),HGMD (20), Open Access GWAS Database (21), UniProt (7) and OMIM (22); mutations from COSMIC (23) and structural variations from Ensembl (6). + +Systems biology + +We also import systems biology information like interactome information from IntAct (24). Reactome (25) stores pathway and interaction information in BioPAX (26) format. BioPAX data exchange format enables the integration of diverse pathway +resources. We successfully solved the problem of storing data released in BioPAX format into a SQL relational schema, which allowed us importing Reactome in CellBase. + +### [Diagnostic Component (TEAM)](diagnostic-component-team/) + +### [Priorization Component (BiERApp)](priorization-component-bierapp/) + +## Usage + +First of all, we should load ngsPipeline module: + +```bash + $ module load ngsPipeline +``` + +This command will load python/2.7.5 module and all the required modules (hpg-aligner, gatk, etc) + +If we launch ngsPipeline with ‘-h’, we will get the usage help: + +```bash + $ ngsPipeline -h + Usage: ngsPipeline.py [-h] -i INPUT -o OUTPUT -p PED --project PROJECT --queue + QUEUE [--stages-path STAGES_PATH] [--email EMAIL] + [--prefix PREFIX] [-s START] [-e END] --log + + Python pipeline + + optional arguments: + -h, --help show this help message and exit + -i INPUT, --input INPUT + -o OUTPUT, --output OUTPUT + Output Data directory + -p PED, --ped PED Ped file with all individuals + --project PROJECT Project Id + --queue QUEUE Queue Id + --stages-path STAGES_PATH + Custom Stages path + --email EMAIL Email + --prefix PREFIX Prefix name for Queue Jobs name + -s START, --start START + Initial stage + -e END, --end END Final stage + --log Log to file +``` + +Let us see a brief description of the arguments: + +```bash + -h --help. Show the help. + + -i, --input. The input data directory. This directory must to have a special structure. We have to create one folder per sample (with the same name). These folders will host the fastq files. These fastq files must have the following pattern “sampleName” + “_” + “1 or 2” + “.fq”. 1 for the first pair (in paired-end sequences), and 2 for the +second one. + + -o , --output. The output folder. This folder will contain all the intermediate and final folders. When the pipeline will be executed completely, we could remove the intermediate folders and keep only the final one (with the VCF file containing all the variants) + + -p , --ped. The ped file with the pedigree. This file contains all the sample names. These names must coincide with the names of the input folders. If our input folder contains more samples than the .ped file, the pipeline will use only the samples from the .ped file. + + --email. Email for PBS notifications. + + --prefix. Prefix for PBS Job names. + + -s, --start & -e, --end. Initial and final stage. If we want to launch the pipeline in a specific stage we must use -s. If we want to end the pipeline in a specific stage we must use -e. + + --log. Using log argument NGSpipeline will prompt all the logs to this file. + + --project>. Project ID of your supercomputer allocation. + + --queue. [Queue](../../resources-allocation-policy/) to run the jobs in. +``` + +Input, output and ped arguments are mandatory. If the output folder does not exist, the pipeline will create it. + +## Examples + +This is an example usage of NGSpipeline: + +We have a folder with the following structure in + +```bash +/apps/bio/omics/1.0/sample_data/ >: + + /apps/bio/omics/1.0/sample_data + └── data + ├── file.ped + ├── sample1 + │ ├── sample1_1.fq + │ └── sample1_2.fq + └── sample2 + ├── sample2_1.fq + └── sample2_2.fq +``` + +The ped file ( file.ped) contains the following info: + +```bash + #family_ID sample_ID parental_ID maternal_ID sex phenotype + FAM sample_A 0 0 1 1 + FAM sample_B 0 0 2 2 +``` + +Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](../../storage/storage/): + +```bash + $ module load ngsPipeline + $ mkdir -p /scratch/$USER/omics/results + $ cp -r /apps/bio/omics/1.0/sample_data /scratch/$USER/omics/ +``` + +Now, we can launch the pipeline (replace OPEN-0-0 with your Project ID): + +```bash + $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped --project OPEN-0-0 --queue qprod +``` + +This command submits the processing [jobs to the queue](../../job-submission-and-execution/). + +If we want to re-launch the pipeline from stage 4 until stage 20 we should use the next command: + +```bash + $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped -s 4 -e 20 --project OPEN-0-0 --queue qprod +``` + +## Details on the Pipeline + +The pipeline calls the following tools + + [fastqc](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/), quality control tool for high throughput sequence data. + [gatk](https://www.broadinstitute.org/gatk/), The Genome Analysis Toolkit or GATK is a software package developed at + the Broad Institute to analyze high-throughput sequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size. + [hpg-aligner](https://github.com/opencb-hpg/hpg-aligner), HPG Aligner has been designed to align short and long reads with high sensitivity, therefore any number of mismatches or indels are allowed. HPG Aligner implements and combines two well known algorithms: _Burrows-Wheeler Transform_ (BWT) to speed-up mapping high-quality reads, and _Smith-Waterman_> (SW) to increase sensitivity when reads cannot be mapped using BWT. + [hpg-fastq](http://docs.bioinfo.cipf.es/projects/fastqhpc/wiki), a quality control tool for high throughput sequence data. + [hpg-variant](http://docs.bioinfo.cipf.es/projects/hpg-variant/wiki), The HPG Variant suite is an ambitious project aimed to provide a complete suite of tools to work with genomic variation data, from VCF tools to variant profiling or genomic statistics. It is being implemented using High Performance Computing technologies to provide the best performance possible. + [picard](http://picard.sourceforge.net/), Picard comprises Java-based command-line utilities that manipulate SAM files, and a Java API (HTSJDK) for creating new programs that read and write SAM files. Both SAM text format and SAM binary (BAM) format are supported. + [samtools](http://samtools.sourceforge.net/samtools-c.shtml), SAM Tools provide various utilities for manipulating alignments in the SAM format, including sorting, merging, indexing and generating alignments in a per-position format. + [snpEff](http://snpeff.sourceforge.net/), Genetic variant annotation and effect prediction toolbox. + +This listing show which tools are used in each step of the pipeline + + stage-00: fastqc + stage-01: hpg_fastq + stage-02: fastqc + stage-03: hpg_aligner and samtools + stage-04: samtools + stage-05: samtools + stage-06: fastqc + stage-07: picard + stage-08: fastqc + stage-09: picard + stage-10: gatk + stage-11: gatk + stage-12: gatk + stage-13: gatk + stage-14: gatk + stage-15: gatk + stage-16: samtools + stage-17: samtools + stage-18: fastqc + stage-19: gatk + stage-20: gatk + stage-21: gatk + stage-22: gatk + stage-23: gatk + stage-24: hpg-variant + stage-25: hpg-variant + stage-26: snpEff + stage-27: snpEff + stage-28: hpg-variant + +## Interpretation + +The output folder contains all the subfolders with the intermediate data. This folder contains the final VCF with all the variants. This file can be uploaded into [TEAM](diagnostic-component-team/) by using the VCF file button. It is important to note here that the entire management of the VCF file is local: no patient’s sequence data is sent over the Internet thus avoiding any problem of data privacy or confidentiality. + +![TEAM upload panel. Once the file has been uploaded, a panel must be chosen from the Panel list. Then, pressing the Run button the diagnostic process starts.]\((../../../img/fig7.png) + + Figure 7. _TEAM upload panel._ _Once the file has been uploaded, a panel must be chosen from the Panel_ list. Then, pressing the Run button the diagnostic process starts. + +Once the file has been uploaded, a panel must be chosen from the Panel list. Then, pressing the Run button the diagnostic process starts. TEAM searches first for known diagnostic mutation(s) taken from four databases: HGMD-public (20), [HUMSAVAR](http://www.uniprot.org/docs/humsavar), ClinVar (29) and COSMIC (23). + + + + Figure 7. The panel manager. The elements used to define a panel are ( A ) disease terms, ( B ) diagnostic mutations and ( C ) genes. Arrows represent actions that can be taken in the panel manager. Panels can be defined by using the known mutations and genes of a particular disease. This can be done by dragging them to the Primary Diagnostic box (action D ). This action, in addition to defining the diseases in the Primary Diagnostic box, automatically adds the corresponding genes to the Genes box. The panels can be customized by adding new genes (action F ) or removing undesired genes (action G). New disease mutations can be added independently or associated to an already existing disease term (action E ). Disease terms can be removed by simply dragging them back (action H ). + +For variant discovering/filtering we should upload the VCF file into BierApp by using the following form: + +\\ + + Figure 8 . \BierApp VCF upload panel. It is recommended to choose a name for the job as well as a description \\. + +Each prioritization (‘job’) has three associated screens that facilitate the filtering steps. The first one, the ‘Summary’ tab, displays a statistic of the data set analyzed, containing the samples analyzed, the number and types of variants found and its distribution according to consequence types. The second screen, in the ‘Variants and effect’ tab, is the actual filtering tool, and the third one, the ‘Genome view’ tab, offers a representation of the selected variants within the genomic context provided by an embedded version of the Genome Maps Tool (30). + + + + Figure 9 . This picture shows all the information associated to the variants. If a variant has an associated phenotype we could see it in the last column. In this case, the variant 7:132481242 CT is associated to the phenotype: large intestine tumor. + +## References + +1. Heng Li, Bob Handsaker, Alec Wysoker, Tim Fennell, Jue Ruan, Nils Homer, Gabor Marth5, Goncalo Abecasis6, Richard Durbin and 1000 Genome Project Data Processing Subgroup: The Sequence Alignment/Map format and SAMtools. Bioinformatics 2009, 25: 2078-2079. +1. McKenna A, Hanna M, Banks E, Sivachenko A, Cibulskis K, Kernytsky A, Garimella K, Altshuler D, Gabriel S, Daly M, DePristo MA: The Genome Analysis Toolkit: a MapReduce framework for analyzing next-generation DNA sequencing data. _Genome Res_ >2010, 20:1297-1303. +1. Petr Danecek, Adam Auton, Goncalo Abecasis, Cornelis A. Albers, Eric Banks, Mark A. DePristo, Robert E. Handsaker, Gerton Lunter, Gabor T. Marth, Stephen T. Sherry, Gilean McVean, Richard Durbin, and 1000 Genomes Project Analysis Group. The variant call format and VCFtools. Bioinformatics 2011, 27: 2156-2158. +1. Medina I, De Maria A, Bleda M, Salavert F, Alonso R, Gonzalez CY, Dopazo J: VARIANT: Command Line, Web service and Web interface for fast and accurate functional characterization of variants found by Next-Generation Sequencing. Nucleic Acids Res 2012, 40:W54-58. +1. Bleda M, Tarraga J, de Maria A, Salavert F, Garcia-Alonso L, Celma M, Martin A, Dopazo J, Medina I: CellBase, a comprehensive collection of RESTful web services for retrieving relevant biological information from heterogeneous sources. Nucleic Acids Res 2012, 40:W609-614. +1. Flicek,P., Amode,M.R., Barrell,D., Beal,K., Brent,S., Carvalho-Silva,D., Clapham,P., Coates,G., Fairley,S., Fitzgerald,S. et al. (2012) Ensembl 2012. Nucleic Acids Res., 40, D84–D90. +1. UniProt Consortium. (2012) Reorganizing the protein space at the Universal Protein Resource (UniProt). Nucleic Acids Res., 40, D71–D75. +1. Kozomara,A. and Griffiths-Jones,S. (2011) miRBase: integrating microRNA annotation and deep-sequencing data. Nucleic Acids Res., 39, D152–D157. +1. Xiao,F., Zuo,Z., Cai,G., Kang,S., Gao,X. and Li,T. (2009) miRecords: an integrated resource for microRNA-target interactions. Nucleic Acids Res., 37, D105–D110. +1. Hsu,S.D., Lin,F.M., Wu,W.Y., Liang,C., Huang,W.C., Chan,W.L., Tsai,W.T., Chen,G.Z., Lee,C.J., Chiu,C.M. et al. (2011) miRTarBase: a database curates experimentally validated microRNA-target interactions. Nucleic Acids Res., 39, D163–D169. +1. Friedman,R.C., Farh,K.K., Burge,C.B. and Bartel,D.P. (2009) Most mammalian mRNAs are conserved targets of microRNAs. Genome Res., 19, 92–105. 12. Betel,D., Wilson,M., Gabow,A., Marks,D.S. and Sander,C. (2008) The microRNA.org resource: targets and expression. Nucleic Acids Res., 36, D149–D153. +1. Dreszer,T.R., Karolchik,D., Zweig,A.S., Hinrichs,A.S., Raney,B.J., Kuhn,R.M., Meyer,L.R., Wong,M., Sloan,C.A., Rosenbloom,K.R. et al. (2012) The UCSC genome browser database: extensions and updates 2011. Nucleic Acids Res.,40, D918–D923. +1. Smith,B., Ashburner,M., Rosse,C., Bard,J., Bug,W., Ceusters,W., Goldberg,L.J., Eilbeck,K., Ireland,A., Mungall,C.J. et al. (2007) The OBO Foundry: coordinated evolution of ontologies to support biomedical data integration. Nat. Biotechnol., 25, 1251–1255. +1. Hunter,S., Jones,P., Mitchell,A., Apweiler,R., Attwood,T.K.,Bateman,A., Bernard,T., Binns,D., Bork,P., Burge,S. et al. (2012) InterPro in 2011: new developments in the family and domain prediction database. Nucleic Acids Res.,40, D306–D312. +1. Sherry,S.T., Ward,M.H., Kholodov,M., Baker,J., Phan,L., Smigielski,E.M. and Sirotkin,K. (2001) dbSNP: the NCBI database of genetic variation. Nucleic Acids Res., 29, 308–311. +1. Altshuler,D.M., Gibbs,R.A., Peltonen,L., Dermitzakis,E., Schaffner,S.F., Yu,F., Bonnen,P.E., de Bakker,P.I., Deloukas,P., Gabriel,S.B. et al. (2010) Integrating common and rare genetic variation in diverse human populations. Nature, 467, 52–58. +1. 1000 Genomes Project Consortium. (2010) A map of human genome variation from population-scale sequencing. Nature, 467, 1061–1073. +1. Hindorff,L.A., Sethupathy,P., Junkins,H.A., Ramos,E.M., Mehta,J.P., Collins,F.S. and Manolio,T.A. (2009) Potential etiologic and functional implications of genome-wide association loci for human diseases and traits. Proc. Natl Acad. Sci. USA, 106, 9362–9367. +1. Stenson,P.D., Ball,E.V., Mort,M., Phillips,A.D., Shiel,J.A., Thomas,N.S., Abeysinghe,S., Krawczak,M. and Cooper,D.N. (2003) Human gene mutation database (HGMD): 2003 update. Hum. Mutat., 21, 577–581. +1. Johnson,A.D. and O’Donnell,C.J. (2009) An open access database of genome-wide association results. BMC Med. Genet, 10, 6. +1. McKusick,V. (1998) A Catalog of Human Genes and Genetic Disorders, 12th edn. John Hopkins University Press,Baltimore, MD. +1. Forbes,S.A., Bindal,N., Bamford,S., Cole,C., Kok,C.Y., Beare,D., Jia,M., Shepherd,R., Leung,K., Menzies,A. et al. (2011) COSMIC: mining complete cancer genomes in the catalogue of somatic mutations in cancer. Nucleic Acids Res., 39, D945–D950. +1. Kerrien,S., Aranda,B., Breuza,L., Bridge,A., Broackes-Carter,F., Chen,C., Duesbury,M., Dumousseau,M., Feuermann,M., Hinz,U. et al. (2012) The Intact molecular interaction database in 2012. Nucleic Acids Res., 40, D841–D846. +1. Croft,D., O’Kelly,G., Wu,G., Haw,R., Gillespie,M., Matthews,L., Caudy,M., Garapati,P., Gopinath,G., Jassal,B. et al. (2011) Reactome: a database of reactions, pathways and biological processes. Nucleic Acids Res., 39, D691–D697. +1. Demir,E., Cary,M.P., Paley,S., Fukuda,K., Lemer,C., Vastrik,I.,Wu,G., D’Eustachio,P., Schaefer,C., Luciano,J. et al. (2010) The BioPAX community standard for pathway data sharing. Nature Biotechnol., 28, 935–942. +1. Alemán Z, García-García F, Medina I, Dopazo J (2014): A web tool for the design and management of panels of genes for targeted enrichment and massive sequencing for clinical applications. Nucleic Acids Res 42: W83-7. +1. [Alemán A](http://www.ncbi.nlm.nih.gov/pubmed?term=Alem%C3%A1n%20A%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Garcia-Garcia F](http://www.ncbi.nlm.nih.gov/pubmed?term=Garcia-Garcia%20F%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Salavert F](http://www.ncbi.nlm.nih.gov/pubmed?term=Salavert%20F%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Medina I](http://www.ncbi.nlm.nih.gov/pubmed?term=Medina%20I%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)>, [Dopazo J](http://www.ncbi.nlm.nih.gov/pubmed?term=Dopazo%20J%5BAuthor%5D&cauthor=true&cauthor_uid=24803668)> (2014). A web-based interactive framework to assist in the prioritization of disease candidate genes in whole-exome sequencing studies. [Nucleic Acids Res.](http://www.ncbi.nlm.nih.gov/pubmed/?term=BiERapp "Nucleic acids research.")>42 :W88-93. +1. Landrum,M.J., Lee,J.M., Riley,G.R., Jang,W., Rubinstein,W.S., Church,D.M. and Maglott,D.R. (2014) ClinVar: public archive of relationships among sequence variation and human phenotype. Nucleic Acids Res., 42, D980–D985. +1. Medina I, Salavert F, Sanchez R, de Maria A, Alonso R, Escobar P, Bleda M, Dopazo J: Genome Maps, a new generation genome browser. Nucleic Acids Res 2013, 41:W41-46. diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md b/docs.it4i/anselm/software/omics-master/priorization-component-bierapp.md similarity index 62% rename from docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md rename to docs.it4i/anselm/software/omics-master/priorization-component-bierapp.md index 8b5cb8cf6ae3cdf00649ea640ec417920d3ad76c..6f88fecc7b92de4bd29a6e022902cb06dbbf1300 100644 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md +++ b/docs.it4i/anselm/software/omics-master/priorization-component-bierapp.md @@ -1,21 +1,19 @@ -Prioritization component (BiERapp) -================================ +# Prioritization component (BiERapp) -### Access +## Access -BiERapp is available at the following address: <http://omics.it4i.cz/bierapp/> +BiERapp is available at the [following address](http://omics.it4i.cz/bierapp/) -!!! Note "Note" - The address is accessible onlyvia VPN. +!!! note + The address is accessible only via VPN. -###BiERapp +## BiERapp -**This tool is aimed to discover new disease genes or variants by studying affected families or cases and controls. It carries out a filtering process to sequentially remove: (i) variants which are not no compatible with the disease because are not expected to have impact on the protein function; (ii) variants that exist at frequencies incompatible with the disease; (iii) variants that do not segregate with the disease. The result is a reduced set of disease gene candidates that should be further validated experimentally.** +** This tool is aimed to discover new disease genes or variants by studying affected families or cases and controls. It carries out a filtering process to sequentially remove: (i) variants which are not no compatible with the disease because are not expected to have impact on the protein function; (ii) variants that exist at frequencies incompatible with the disease; (iii) variants that do not segregate with the disease. The result is a reduced set of disease gene candidates that should be further validated experimentally. ** BiERapp (28) efficiently helps in the identification of causative variants in family and sporadic genetic diseases. The program reads lists of predicted variants (nucleotide substitutions and indels) in affected individuals or tumor samples and controls. In family studies, different modes of inheritance can easily be defined to filter out variants that do not segregate with the disease along the family. Moreover, BiERapp integrates additional information such as allelic frequencies in the general population and the most popular damaging scores to further narrow down the number of putative variants in successive filtering steps. BiERapp provides an interactive and user-friendly interface that implements the filtering strategy used in the context of a large-scale genomic project carried out by the Spanish Network for Research, in Rare Diseases (CIBERER) and the Medical Genome Project. in which more than 800 exomes have been analyzed.  -**Figure 6**. Web interface to the prioritization tool. This figure shows the interface of the web tool for candidate gene +** Figure 6 **. Web interface to the prioritization tool. This figure shows the interface of the web tool for candidate gene prioritization with the filters available. The tool includes a genomic viewer (Genome Maps 30) that enables the representation of the variants in the corresponding genomic coordinates. - diff --git a/docs.it4i/anselm-cluster-documentation/software/openfoam.md b/docs.it4i/anselm/software/openfoam.md similarity index 75% rename from docs.it4i/anselm-cluster-documentation/software/openfoam.md rename to docs.it4i/anselm/software/openfoam.md index 56f9d6985d02b42c09a68cefd8b3a39523955a61..a2c98e3f2d84e11b0e73b3b6c7d9c083422101bb 100644 --- a/docs.it4i/anselm-cluster-documentation/software/openfoam.md +++ b/docs.it4i/anselm/software/openfoam.md @@ -1,34 +1,33 @@ -OpenFOAM -======== +# OpenFOAM -##A free, open source CFD software package +a Free, Open Source CFD Software Package + +## Introduction -Introduction ----------------- OpenFOAM is a free, open source CFD software package developed by [**OpenCFD Ltd**](http://www.openfoam.com/about) at [**ESI Group**](http://www.esi-group.com/) and distributed by the [**OpenFOAM Foundation **](http://www.openfoam.org/). It has a large user base across most areas of engineering and science, from both commercial and academic organisations. Homepage: <http://www.openfoam.com/> -###Installed version +### Installed Version Currently, several version compiled by GCC/ICC compilers in single/double precision with several version of openmpi are available on Anselm. For example syntax of available OpenFOAM module is: -< openfoam/2.2.1-icc-openmpi1.6.5-DP > +\<openfoam\/2.2.1-icc-openmpi1.6.5-DP\> this means openfoam version 2.2.1 compiled by ICC compiler with openmpi1.6.5 in double precision. Naming convection of the installed versions is following: -openfoam/<>VERSION>>-<>COMPILER<span>>-<</span><span>openmpiVERSION</span><span>>-<</span><span>PRECISION</span><span>></span> +openfoam\<VERSION\>-\<COMPILER\>\<openmpiVERSION\>-\<PRECISION\> -- <VERSION>> - version of openfoam -- <COMPILER> - version of used compiler -- <openmpiVERSION> - version of used openmpi/impi -- <PRECISION> - DP/SP – double/single precision +* \<VERSION\> - version of openfoam +* \<COMPILER\> - version of used compiler +* \<openmpiVERSION\> - version of used openmpi/impi +* \<PRECISION\> - DP/SP – double/single precision -###Available OpenFOAM modules +### Available OpenFOAM Modules To check available modules use @@ -46,10 +45,9 @@ In /opt/modules/modulefiles/engineering you can see installed engineering softwa lsdyna/7.x.x openfoam/2.2.1-gcc481-openmpi1.6.5-SP ``` -For information how to use modules please [look here](../environment-and-modules/ "Environment and Modules "). +For information how to use modules please [look here](../environment-and-modules/). -Getting Started -------------------- +## Getting Started To create OpenFOAM environment on ANSELM give the commands: @@ -59,10 +57,10 @@ To create OpenFOAM environment on ANSELM give the commands: $ source $FOAM_BASHRC ``` -!!! Note "Note" - Please load correct module with your requirements “compiler - GCC/ICC, precision - DP/SP”. +!!! note + Please load correct module with your requirements “compiler - GCC/ICC, precision - DP/SP”. -Create a project directory within the $HOME/OpenFOAM directory named ><USER>-<OFversion> and create a directory named run within it, e.g. by typing: +Create a project directory within the $HOME/OpenFOAM directory named \<USER\>-\<OFversion\> and create a directory named run within it, e.g. by typing: ```bash $ mkdir -p $FOAM_RUN @@ -74,7 +72,7 @@ Project directory is now available by typing: $ cd /home/<USER>/OpenFOAM/<USER>-<OFversion>/run ``` -<OFversion> - for example <2.2.1> +\<OFversion\> - for example \<2.2.1\> or @@ -90,10 +88,9 @@ Copy the tutorial examples directory in the OpenFOAM distribution to the run dir Now you can run the first case for example incompressible laminar flow in a cavity. -Running Serial Applications -------------------------------- +## Running Serial Applications -Create a Bash script >test.sh +Create a Bash script test.sh ```bash #!/bin/bash @@ -114,16 +111,17 @@ Job submission ```bash $ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh ``` -For information about job submission please [look here](../resource-allocation-and-job-execution/job-submission-and-execution/ "Job submission"). -Running applications in parallel -------------------------------------------------- +For information about job submission please [look here](../job-submission-and-execution/). + +## Running Applications in Parallel + Run the second case for example external incompressible turbulent flow - case - motorBike. First we must run serial application bockMesh and decomposePar for preparation of parallel computation. -!!! Note "Note" - Create a Bash scrip test.sh: +!!! note + Create a Bash scrip test.sh: ```bash #!/bin/bash @@ -147,8 +145,8 @@ Job submission This job create simple block mesh and domain decomposition. Check your decomposition, and submit parallel computation: -!!! Note "Note" - Create a PBS script testParallel.pbs: +!!! note + Create a PBS script testParallel.pbs: ```bash #!/bin/bash @@ -179,8 +177,8 @@ Job submission ```bash $ qsub testParallel.pbs ``` -Compile your own solver ----------------------------------------- + +## Compile Your Own Solver Initialize OpenFOAM environment before compiling your solver @@ -210,7 +208,7 @@ Rename icoFoam.C to My_icoFOAM.C $ mv icoFoam.C My_icoFoam.C ``` -Edit >*files* file in *Make* directory: +Edit _files_ file in _Make_ directory: ```bash icoFoam.C @@ -229,6 +227,3 @@ In directory My_icoFoam give the compilation command: ```bash $ wmake ``` - ------------------------------------------------------------------------- - **Have a fun with OpenFOAM :)** diff --git a/docs.it4i/anselm/software/operating-system.md b/docs.it4i/anselm/software/operating-system.md new file mode 100644 index 0000000000000000000000000000000000000000..e43800e0d038882270620ccb8e95d50df94a5b71 --- /dev/null +++ b/docs.it4i/anselm/software/operating-system.md @@ -0,0 +1,3 @@ +# Operating System + +The operating system on Anselm is Linux - [**Red Hat Enterprise Linux release 6.x**](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux). diff --git a/docs.it4i/anselm-cluster-documentation/software/paraview.md b/docs.it4i/anselm/software/paraview.md similarity index 78% rename from docs.it4i/anselm-cluster-documentation/software/paraview.md rename to docs.it4i/anselm/software/paraview.md index b9deba00e7e363e2089427f58dac808daba62585..7007369800f88b5c672640ee8c32952ca73d4df7 100644 --- a/docs.it4i/anselm-cluster-documentation/software/paraview.md +++ b/docs.it4i/anselm/software/paraview.md @@ -1,10 +1,8 @@ -ParaView -======== +# ParaView -##An open-source, multi-platform data analysis and visualization application +Open-Source, Multi-Platform Data Analysis and Visualization Application -Introduction ------------- +## Introduction **ParaView** is an open-source, multi-platform data analysis and visualization application. ParaView users can quickly build visualizations to analyze their data using qualitative and quantitative techniques. The data exploration can be done interactively in 3D or programmatically using ParaView's batch processing capabilities. @@ -12,15 +10,15 @@ ParaView was developed to analyze extremely large datasets using distributed mem Homepage : <http://www.paraview.org/> -Installed version ------------------ +## Installed Version + Currently, version 4.0.1 compiled with GCC 4.8.1 against Bull MPI library and OSMesa 10.0 is installed on Anselm. -Usage ------ +## Usage + On Anselm, ParaView is to be used in client-server mode. A parallel ParaView server is launched on compute nodes by the user, and client is launched on your desktop PC to control and view the visualization. Download ParaView client application for your OS here: <http://paraview.org/paraview/resources/software.php>. Important : **your version must match the version number installed on Anselm** ! (currently v4.0.1) -### Launching server +### Launching Server To launch the server, you must first allocate compute nodes, for example @@ -28,7 +26,7 @@ To launch the server, you must first allocate compute nodes, for example $ qsub -I -q qprod -A OPEN-0-0 -l select=2 ``` -to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](../resource-allocation-and-job-execution/introduction/) for details. +to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](../job-submission-and-execution/) for details. After the interactive session is opened, load the ParaView module : @@ -47,7 +45,7 @@ Now launch the parallel server, with number of nodes times 16 processes: Note the that the server is listening on compute node cn77 in this case, we shall use this information later. -### Client connection +### Client Connection Because a direct connection is not allowed to compute nodes on Anselm, you must establish a SSH tunnel to connect to the server. Choose a port number on your PC to be forwarded to ParaView server, for example 12345. If your PC is running Linux, use this command to establish a SSH tunnel: @@ -55,9 +53,9 @@ Because a direct connection is not allowed to compute nodes on Anselm, you must ssh -TN -L 12345:cn77:11111 username@anselm.it4i.cz ``` -replace username with your login and cn77 with the name of compute node your ParaView server is running on (see previous step). If you use PuTTY on Windows, load Anselm connection configuration, t>hen go to Connection-> SSH>->Tunnels to set up the port forwarding. Click Remote radio button. Insert 12345 to Source port textbox. Insert cn77:11111. Click Add button, then Open. +replace username with your login and cn77 with the name of compute node your ParaView server is running on (see previous step). If you use PuTTY on Windows, load Anselm connection configuration, t>hen go to Connection-> SSH>->Tunnels to set up the port forwarding. Click Remote radio button. Insert 12345 to Source port textbox. Insert cn77:11111. Click Add button, then Open. -Now launch ParaView client installed on your desktop PC. Select File->Connect..., click Add Server. Fill in the following : +Now launch ParaView client installed on your desktop PC. Select File->Connect..., click Add Server. Fill in the following : Name : Anselm tunnel Server Type : Client/Server @@ -72,10 +70,10 @@ Click Configure, Save, the configuration is now saved for later use. Now click C You can now use Parallel ParaView. -### Close server +### Close Server Remember to close the interactive session after you finish working with ParaView server, as it will remain launched even after your client is disconnected and will continue to consume resources. -GPU support ------------ +## GPU Support + Currently, GPU acceleration is not supported in the server and ParaView will not take advantage of accelerated nodes on Anselm. Support for GPU acceleration might be added in the future. diff --git a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md b/docs.it4i/anselm/software/virtualization.md similarity index 83% rename from docs.it4i/anselm-cluster-documentation/software/kvirtualization.md rename to docs.it4i/anselm/software/virtualization.md index 508fe8bf91326cf468ce398d2e2ae275a02c8ff4..a5c7c95aa5f2c1df601606ecc42ed2c8398fb249 100644 --- a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md +++ b/docs.it4i/anselm/software/virtualization.md @@ -1,24 +1,22 @@ -Virtualization -============== +# Virtualization -##Running virtual machines on compute nodes +Running virtual machines on compute nodes + +## Introduction -Introduction ------------- There are situations when Anselm's environment is not suitable for user needs. -- Application requires different operating system (e.g Windows), application is not available for Linux -- Application requires different versions of base system libraries and tools -- Application requires specific setup (installation, configuration) of complex software stack -- Application requires privileged access to operating system -- ... and combinations of above cases +* Application requires different operating system (e.g Windows), application is not available for Linux +* Application requires different versions of base system libraries and tools +* Application requires specific setup (installation, configuration) of complex software stack +* Application requires privileged access to operating system +* ... and combinations of above cases -We offer solution for these cases - **virtualization**. Anselm's environment gives the possibility to run virtual machines on compute nodes. Users can create their own images of operating system with specific software stack and run instances of these images as virtual machines on compute nodes. Run of virtual machines is provided by standard mechanism of [Resource Allocation and Job Execution](../../resource-allocation-and-job-execution/introduction/). +We offer solution for these cases - **virtualization**. Anselm's environment gives the possibility to run virtual machines on compute nodes. Users can create their own images of operating system with specific software stack and run instances of these images as virtual machines on compute nodes. Run of virtual machines is provided by standard mechanism of [Resource Allocation and Job Execution](../job-submission-and-execution/). Solution is based on QEMU-KVM software stack and provides hardware-assisted x86 virtualization. -Limitations ------------ +## Limitations Anselm's infrastructure was not designed for virtualization. Anselm's environment is not intended primary for virtualization, compute nodes, storages and all infrastructure of Anselm is intended and optimized for running HPC jobs, this implies suboptimal configuration of virtualization and limitations. @@ -28,53 +26,52 @@ Virtualization has also some drawbacks, it is not so easy to setup efficient sol Solution described in chapter [HOWTO](virtualization/#howto) is suitable for single node tasks, does not introduce virtual machine clustering. -!!! Note "Note" - Please consider virtualization as last resort solution for your needs. +!!! note + Please consider virtualization as last resort solution for your needs. - Please consult use of virtualization with IT4Innovation's support. +!!! warning + Please consult use of virtualization with IT4Innovation's support. - For running Windows application (when source code and Linux native application are not available) consider use of Wine, Windows compatibility layer. Many Windows applications can be run using Wine with less effort and better performance than when using virtualization. +For running Windows application (when source code and Linux native application are not available) consider use of Wine, Windows compatibility layer. Many Windows applications can be run using Wine with less effort and better performance than when using virtualization. -Licensing ---------- +## Licensing IT4Innovations does not provide any licenses for operating systems and software of virtual machines. Users are ( in accordance with [Acceptable use policy document](http://www.it4i.cz/acceptable-use-policy.pdf)) fully responsible for licensing all software running in virtual machines on Anselm. Be aware of complex conditions of licensing software in virtual environments. -!!! Note "Note" - Users are responsible for licensing OS e.g. MS Windows and all software running in their virtual machines. +!!! note + Users are responsible for licensing OS e.g. MS Windows and all software running in their virtual machines. - HOWTO ----------- +## Howto ### Virtual Machine Job Workflow We propose this job workflow: - + Our recommended solution is that job script creates distinct shared job directory, which makes a central point for data exchange between Anselm's environment, compute node (host) (e.g. HOME, SCRATCH, local scratch and other local or cluster file systems) and virtual machine (guest). Job script links or copies input data and instructions what to do (run script) for virtual machine to job directory and virtual machine process input data according instructions in job directory and store output back to job directory. We recommend, that virtual machine is running in so called [snapshot mode](virtualization/#snapshot-mode), image is immutable - image does not change, so one image can be used for many concurrent jobs. ### Procedure -1. Prepare image of your virtual machine -2. Optimize image of your virtual machine for Anselm's virtualization -3. Modify your image for running jobs -4. Create job script for executing virtual machine -5. Run jobs +1. Prepare image of your virtual machine +1. Optimize image of your virtual machine for Anselm's virtualization +1. Modify your image for running jobs +1. Create job script for executing virtual machine +1. Run jobs -### Prepare image of your virtual machine +### Prepare Image of Your Virtual Machine You can either use your existing image or create new image from scratch. QEMU currently supports these image types or formats: -- raw -- cloop -- cow -- qcow -- qcow2 -- vmdk - VMware 3 & 4, or 6 image format, for exchanging images with that product -- vdi - VirtualBox 1.1 compatible image format, for exchanging images with VirtualBox. +* raw +* cloop +* cow +* qcow +* qcow2 +* vmdk - VMware 3 & 4, or 6 image format, for exchanging images with that product +* vdi - VirtualBox 1.1 compatible image format, for exchanging images with VirtualBox. You can convert your existing image using qemu-img convert command. Supported formats of this command are: blkdebug blkverify bochs cloop cow dmg file ftp ftps host_cdrom host_device host_floppy http https nbd parallels qcow qcow2 qed raw sheepdog tftp vdi vhdx vmdk vpc vvfat. @@ -82,7 +79,7 @@ We recommend using advanced QEMU native image format qcow2. [More about QEMU Images](http://en.wikibooks.org/wiki/QEMU/Images) -### Optimize image of your virtual machine +### Optimize Image of Your Virtual Machine Use virtio devices (for disk/drive and network adapter) and install virtio drivers (paravirtualized drivers) into virtual machine. There is significant performance gain when using virtio drivers. For more information see [Virtio Linux](http://www.linux-kvm.org/page/Virtio) and [Virtio Windows](http://www.linux-kvm.org/page/WindowsGuestDrivers/Download_Drivers). @@ -94,16 +91,16 @@ Remove all paging space, swap files, partitions, etc. Shrink your image. (It is recommended to zero all free space and reconvert image using qemu-img.) -### Modify your image for running jobs +### Modify Your Image for Running Jobs Your image should run some kind of operating system startup script. Startup script should run application and when application exits run shutdown or quit virtual machine. We recommend, that startup script -- maps Job Directory from host (from compute node) -- runs script (we call it "run script") from Job Directory and waits for application's exit - - for management purposes if run script does not exist wait for some time period (few minutes) -- shutdowns/quits OS +* maps Job Directory from host (from compute node) +* runs script (we call it "run script") from Job Directory and waits for application's exit + * for management purposes if run script does not exist wait for some time period (few minutes) +* shutdowns/quits OS For Windows operating systems we suggest using Local Group Policy Startup script, for Linux operating systems rc.local, runlevel init script or similar service. @@ -149,7 +146,7 @@ Example startup script for Windows virtual machine: Example startup script maps shared job script as drive z: and looks for run script called run.bat. If run script is found it is run else wait for 5 minutes, then shutdown virtual machine. -### Create job script for executing virtual machine +### Create Job Script for Executing Virtual Machine Create job script according recommended @@ -201,11 +198,11 @@ Example run script (run.bat) for Windows virtual machine: call application.bat z:data z:output ``` -Run script runs application from shared job directory (mapped as drive z:), process input data (z:data) from job directory and store output to job directory (z:output). +Run script runs application from shared job directory (mapped as drive z:), process input data (z:data) from job directory and store output to job directory (z:output). -### Run jobs +### Run Jobs -Run jobs as usual, see [Resource Allocation and Job Execution](../../resource-allocation-and-job-execution/introduction/). Use only full node allocation for virtualization jobs. +Run jobs as usual, see [Resource Allocation and Job Execution](../job-submission-and-execution/). Use only full node allocation for virtualization jobs. ### Running Virtual Machines @@ -222,6 +219,7 @@ Get help ```bash $ man qemu ``` + Run virtual machine (simple) ```bash @@ -250,10 +248,10 @@ Run virtual machine using optimized devices, user network back-end with sharing Thanks to port forwarding you can access virtual machine via SSH (Linux) or RDP (Windows) connecting to IP address of compute node (and port 2222 for SSH). You must use VPN network). -!!! Note "Note" - Keep in mind, that if you use virtio devices, you must have virtio drivers installed on your virtual machine. +!!! note + Keep in mind, that if you use virtio devices, you must have virtio drivers installed on your virtual machine. -### Networking and data sharing +### Networking and Data Sharing For networking virtual machine we suggest to use (default) user network back-end (sometimes called slirp). This network back-end NATs virtual machines and provides useful services for virtual machines as DHCP, DNS, SMB sharing, port forwarding. @@ -279,9 +277,9 @@ Optimized network setup with sharing and port forwarding $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::2222-:22 ``` -### Advanced networking +### Advanced Networking -**Internet access** +#### Internet Access Sometime your virtual machine needs access to internet (install software, updates, software activation, etc). We suggest solution using Virtual Distributed Ethernet (VDE) enabled QEMU with SLIRP running on login node tunneled to compute node. Be aware, this setup has very low performance, the worst performance of all described solutions. @@ -323,7 +321,7 @@ Optimized setup $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net0 -netdev vde,id=net0,sock=/tmp/sw0 ``` -**TAP interconnect** +#### TAP Interconnect Both user and vde network back-end have low performance. For fast interconnect (10 Gbit/s and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device. @@ -340,9 +338,9 @@ Interface tap0 has IP address 192.168.1.1 and network mask 255.255.255.0 (/24). Redirected ports: -- DNS udp/53->udp/3053, tcp/53->tcp3053 -- DHCP udp/67->udp3067 -- SMB tcp/139->tcp3139, tcp/445->tcp3445). +* DNS udp/53->udp/3053, tcp/53->tcp3053 +* DHCP udp/67->udp3067 +* SMB tcp/139->tcp3139, tcp/445->tcp3445). You can configure IP address of virtual machine statically or dynamically. For dynamic addressing provide your DHCP server on port 3067 of tap0 interface, you can also provide your DNS server on port 3053 of tap0 interface for example: @@ -395,7 +393,7 @@ Run SMB services Virtual machine can of course have more than one network interface controller, virtual machine can use more than one network back-end. So, you can combine for example use network back-end and TAP interconnect. -### Snapshot mode +### Snapshot Mode In snapshot mode image is not written, changes are written to temporary file (and discarded after virtual machine exits). **It is strongly recommended mode for running your jobs.** Set TMPDIR environment variable to local scratch directory for placement temporary files. @@ -404,7 +402,7 @@ In snapshot mode image is not written, changes are written to temporary file (an $ qemu-system-x86_64 ... -snapshot ``` -### Windows guests +### Windows Guests For Windows guests we recommend these options, life will be easier: diff --git a/docs.it4i/anselm-cluster-documentation/storage.md b/docs.it4i/anselm/storage.md similarity index 63% rename from docs.it4i/anselm-cluster-documentation/storage.md rename to docs.it4i/anselm/storage.md index d67279a8ae66cfc36af743d7db9850758961643f..ad082c0d8486efa67428bae1a327527990fc64f2 100644 --- a/docs.it4i/anselm-cluster-documentation/storage.md +++ b/docs.it4i/anselm/storage.md @@ -1,15 +1,12 @@ -Storage -======= +# Storage There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, ramdisk and tmp file systems. -Archiving ---------- +## Archiving Please don't use shared filesystems as a backup for large amount of data or long-term archiving mean. The academic staff and students of research institutions in the Czech Republic can use [CESNET storage service](#cesnet-data-storage), which is available via SSHFS. -Shared Filesystems ------------------- +## Shared Filesystems Anselm computer provides two main shared filesystems, the [HOME filesystem](#home) and the [SCRATCH filesystem](#scratch). Both HOME and SCRATCH filesystems are realized as a parallel Lustre filesystem. Both shared file systems are accessible via the Infiniband network. Extended ACLs are provided on both Lustre filesystems for the purpose of sharing data with other users using fine-grained control. @@ -26,17 +23,17 @@ If multiple clients try to read and write the same part of a file at the same ti There is default stripe configuration for Anselm Lustre filesystems. However, users can set the following stripe parameters for their own directories or files to get optimum I/O performance: 1. stripe_size: the size of the chunk in bytes; specify with k, m, or g to use units of KB, MB, or GB, respectively; the size must be an even multiple of 65,536 bytes; default is 1MB for all Anselm Lustre filesystems -2. stripe_count the number of OSTs to stripe across; default is 1 for Anselm Lustre filesystems one can specify -1 to use all OSTs in the filesystem. -3. stripe_offset The index of the OST where the first stripe is to be placed; default is -1 which results in random selection; using a non-default value is NOT recommended. +1. stripe_count the number of OSTs to stripe across; default is 1 for Anselm Lustre filesystems one can specify -1 to use all OSTs in the filesystem. +1. stripe_offset The index of the OST where the first stripe is to be placed; default is -1 which results in random selection; using a non-default value is NOT recommended. -!!! Note "Note" - Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. +!!! note + Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. -Use the lfs getstripe for getting the stripe parameters. Use the lfs setstripe command for setting the stripe parameters to get optimal I/O performance The correct stripe setting depends on your needs and file access patterns. +Use the lfs getstripe for getting the stripe parameters. Use the lfs setstripe command for setting the stripe parameters to get optimal I/O performance The correct stripe setting depends on your needs and file access patterns. ```bash $ lfs getstripe dir|filename -$ lfs setstripe -s stripe_size -c stripe_count -o stripe_offset dir|filename +$ lfs setstripe -s stripe_size -c stripe_count -o stripe_offset dir|filename ``` Example: @@ -63,15 +60,15 @@ $ man lfs ### Hints on Lustre Stripping -!!! Note "Note" - Increase the stripe_count for parallel I/O to the same file. +!!! note + Increase the stripe_count for parallel I/O to the same file. When multiple processes are writing blocks of data to the same file in parallel, the I/O performance for large files will improve when the stripe_count is set to a larger value. The stripe count sets the number of OSTs the file will be written to. By default, the stripe count is set to 1. While this default setting provides for efficient access of metadata (for example to support the ls -l command), large files should use stripe counts of greater than 1. This will increase the aggregate I/O bandwidth by using multiple OSTs in parallel instead of just one. A rule of thumb is to use a stripe count approximately equal to the number of gigabytes in the file. Another good practice is to make the stripe count be an integral factor of the number of processes performing the write in parallel, so that you achieve load balance among the OSTs. For example, set the stripe count to 16 instead of 15 when you have 64 processes performing the writes. -!!! Note "Note" - Using a large stripe size can improve performance when accessing very large files +!!! note + Using a large stripe size can improve performance when accessing very large files Large stripe size allows each client to have exclusive access to its own part of a file. However, it can be counterproductive in some cases if it does not match your I/O pattern. The choice of stripe size has no effect on a single-stripe file. @@ -79,84 +76,84 @@ Read more on <http://doc.lustre.org/lustre_manual.xhtml#managingstripingfreespac ### Lustre on Anselm -The architecture of Lustre on Anselm is composed of two metadata servers (MDS) and four data/object storage servers (OSS). Two object storage servers are used for file system HOME and another two object storage servers are used for file system SCRATCH. +The architecture of Lustre on Anselm is composed of two metadata servers (MDS) and four data/object storage servers (OSS). Two object storage servers are used for file system HOME and another two object storage servers are used for file system SCRATCH. Configuration of the storages -- HOME Lustre object storage - - One disk array NetApp E5400 - - 22 OSTs - - 227 2TB NL-SAS 7.2krpm disks - - 22 groups of 10 disks in RAID6 (8+2) - - 7 hot-spare disks -- SCRATCH Lustre object storage - - Two disk arrays NetApp E5400 - - 10 OSTs - - 106 2TB NL-SAS 7.2krpm disks - - 10 groups of 10 disks in RAID6 (8+2) - - 6 hot-spare disks -- Lustre metadata storage - - One disk array NetApp E2600 - - 12 300GB SAS 15krpm disks - - 2 groups of 5 disks in RAID5 - - 2 hot-spare disks - -###HOME +* HOME Lustre object storage + * One disk array NetApp E5400 + * 22 OSTs + * 227 2TB NL-SAS 7.2krpm disks + * 22 groups of 10 disks in RAID6 (8+2) + * 7 hot-spare disks +* SCRATCH Lustre object storage + * Two disk arrays NetApp E5400 + * 10 OSTs + * 106 2TB NL-SAS 7.2krpm disks + * 10 groups of 10 disks in RAID6 (8+2) + * 6 hot-spare disks +* Lustre metadata storage + * One disk array NetApp E2600 + * 12 300GB SAS 15krpm disks + * 2 groups of 5 disks in RAID5 + * 2 hot-spare disks + +\###HOME The HOME filesystem is mounted in directory /home. Users home directories /home/username reside on this filesystem. Accessible capacity is 320TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 250GB per user. If 250GB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request. -!!! Note "Note" - The HOME filesystem is intended for preparation, evaluation, processing and storage of data generated by active Projects. +!!! note + The HOME filesystem is intended for preparation, evaluation, processing and storage of data generated by active Projects. The HOME filesystem should not be used to archive data of past Projects or other unrelated data. -The files on HOME filesystem will not be deleted until end of the [users lifecycle](../get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials/). +The files on HOME filesystem will not be deleted until end of the [users lifecycle](../general/obtaining-login-credentials/obtaining-login-credentials/). The filesystem is backed up, such that it can be restored in case of catasthropic failure resulting in significant data loss. This backup however is not intended to restore old versions of user data or to restore (accidentaly) deleted files. The HOME filesystem is realized as Lustre parallel filesystem and is available on all login and computational nodes. Default stripe size is 1MB, stripe count is 1. There are 22 OSTs dedicated for the HOME filesystem. -!!! Note "Note" - Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. +!!! note + Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. -|HOME filesystem|| -|---|---| -|Mountpoint|/home| -|Capacity|320 TB| -|Throughput|2 GB/s| -|User quota|250 GB| -|Default stripe size|1 MB| -|Default stripe count|1| -|Number of OSTs|22| +| HOME filesystem | | +| -------------------- | ------ | +| Mountpoint | /home | +| Capacity | 320 TB | +| Throughput | 2 GB/s | +| User quota | 250 GB | +| Default stripe size | 1 MB | +| Default stripe count | 1 | +| Number of OSTs | 22 | -###SCRATCH +\###SCRATCH The SCRATCH filesystem is mounted in directory /scratch. Users may freely create subdirectories and files on the filesystem. Accessible capacity is 146TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 100TB per user. The purpose of this quota is to prevent runaway programs from filling the entire filesystem and deny service to other users. If 100TB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request. -!!! Note "Note" - The Scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the SCRATCH filesystem as their working directory. +!!! note + The Scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the SCRATCH filesystem as their working directory. >Users are advised to save the necessary data from the SCRATCH filesystem to HOME filesystem after the calculations and clean up the scratch files. - Files on the SCRATCH filesystem that are **not accessed for more than 90 days** will be automatically **deleted**. + Files on the SCRATCH filesystem that are **not accessed for more than 90 days** will be automatically **deleted**. The SCRATCH filesystem is realized as Lustre parallel filesystem and is available from all login and computational nodes. Default stripe size is 1MB, stripe count is 1. There are 10 OSTs dedicated for the SCRATCH filesystem. -!!! Note "Note" - Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. +!!! note + Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. -|SCRATCH filesystem|| -|---|---| -|Mountpoint|/scratch| -|Capacity|146TB| -|Throughput|6GB/s| -|User quota|100TB| -|Default stripe size|1MB| -|Default stripe count|1| -|Number of OSTs|10| +| SCRATCH filesystem | | +| -------------------- | -------- | +| Mountpoint | /scratch | +| Capacity | 146TB | +| Throughput | 6GB/s | +| User quota | 100TB | +| Default stripe size | 1MB | +| Default stripe count | 1 | +| Number of OSTs | 10 | -### Disk usage and quota commands +### Disk Usage and Quota Commands User quotas on the file systems can be checked and reviewed using following command: @@ -169,11 +166,11 @@ Example for Lustre HOME directory: ```bash $ lfs quota /home Disk quotas for user user001 (uid 1234): - Filesystem kbytes quota limit grace files quota limit grace - /home 300096 0 250000000 - 2102 0 500000 - + Filesystem kbytes quota limit grace files quota limit grace + /home 300096 0 250000000 - 2102 0 500000 - Disk quotas for group user001 (gid 1234): - Filesystem kbytes quota limit grace files quota limit grace - /home 300096 0 0 - 2102 0 0 - + Filesystem kbytes quota limit grace files quota limit grace + /home 300096 0 0 - 2102 0 0 - ``` In this example, we view current quota size limit of 250GB and 300MB currently used by user001. @@ -183,7 +180,7 @@ Example for Lustre SCRATCH directory: ```bash $ lfs quota /scratch Disk quotas for user user001 (uid 1234): - Filesystem kbytes quota limit grace files quota limit grace + Filesystem kbytes quota limit grace files quota limit grace /scratch 8 0 100000000000 - 3 0 0 - Disk quotas for group user001 (gid 1234): Filesystem kbytes quota limit grace files quota limit grace @@ -232,7 +229,7 @@ ACLs on a Lustre file system work exactly like ACLs on any Linux file system. Th [vop999@login1.anselm ~]$ umask 027 [vop999@login1.anselm ~]$ mkdir test [vop999@login1.anselm ~]$ ls -ld test -drwxr-x--- 2 vop999 vop999 4096 Nov 5 14:17 test +drwxr-x--- 2 vop999 vop999 4096 Nov 5 14:17 test [vop999@login1.anselm ~]$ getfacl test # file: test # owner: vop999 @@ -243,7 +240,7 @@ other::--- [vop999@login1.anselm ~]$ setfacl -m user:johnsm:rwx test [vop999@login1.anselm ~]$ ls -ld test -drwxrwx---+ 2 vop999 vop999 4096 Nov 5 14:17 test +drwxrwx---+ 2 vop999 vop999 4096 Nov 5 14:17 test [vop999@login1.anselm ~]$ getfacl test # file: test # owner: vop999 @@ -257,76 +254,74 @@ other::--- Default ACL mechanism can be used to replace setuid/setgid permissions on directories. Setting a default ACL on a directory (-d flag to setfacl) will cause the ACL permissions to be inherited by any newly created file or subdirectory within the directory. Refer to this page for more information on Linux ACL: -[http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html ](http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html) +[http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html](http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html) -Local Filesystems ------------------ +## Local Filesystems ### Local Scratch -!!! Note "Note" - Every computational node is equipped with 330GB local scratch disk. +!!! note + Every computational node is equipped with 330GB local scratch disk. Use local scratch in case you need to access large amount of small files during your calculation. The local scratch disk is mounted as /lscratch and is accessible to user at /lscratch/$PBS_JOBID directory. -The local scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs that access large number of small files within the calculation must use the local scratch filesystem as their working directory. This is required for performance reasons, as frequent access to number of small files may overload the metadata servers (MDS) of the Lustre filesystem. +The local scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs that access large number of small files within the calculation must use the local scratch filesystem as their working directory. This is required for performance reasons, as frequent access to number of small files may overload the metadata servers (MDS) of the Lustre filesystem. -!!! Note "Note" - The local scratch directory /lscratch/$PBS_JOBID will be deleted immediately after the calculation end. Users should take care to save the output data from within the jobscript. +!!! note + The local scratch directory /lscratch/$PBS_JOBID will be deleted immediately after the calculation end. Users should take care to save the output data from within the jobscript. -|local SCRATCH filesystem|| -|---|---| -|Mountpoint|/lscratch| -|Accesspoint|/lscratch/$PBS_JOBID| -|Capacity|330GB| -|Throughput|100MB/s| -|User quota|none| +| local SCRATCH filesystem | | +| ------------------------ | -------------------- | +| Mountpoint | /lscratch | +| Accesspoint | /lscratch/$PBS_JOBID | +| Capacity | 330GB | +| Throughput | 100MB/s | +| User quota | none | -### RAM disk +### RAM Disk Every computational node is equipped with filesystem realized in memory, so called RAM disk. -!!! Note "Note" - Use RAM disk in case you need really fast access to your data of limited size during your calculation. Be very careful, use of RAM disk filesystem is at the expense of operational memory. +!!! note + Use RAM disk in case you need really fast access to your data of limited size during your calculation. Be very careful, use of RAM disk filesystem is at the expense of operational memory. The local RAM disk is mounted as /ramdisk and is accessible to user at /ramdisk/$PBS_JOBID directory. The local RAM disk filesystem is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. Size of RAM disk filesystem is limited. Be very careful, use of RAM disk filesystem is at the expense of operational memory. It is not recommended to allocate large amount of memory and use large amount of data in RAM disk filesystem at the same time. -!!! Note "Note" - The local RAM disk directory /ramdisk/$PBS_JOBID will be deleted immediately after the calculation end. Users should take care to save the output data from within the jobscript. +!!! note + The local RAM disk directory /ramdisk/$PBS_JOBID will be deleted immediately after the calculation end. Users should take care to save the output data from within the jobscript. -|RAM disk|| -|---|---| -|Mountpoint| /ramdisk| -|Accesspoint| /ramdisk/$PBS_JOBID| -|Capacity|60GB at compute nodes without accelerator, 90GB at compute nodes with accelerator, 500GB at fat nodes| -|Throughput|over 1.5 GB/s write, over 5 GB/s read, single thread, over 10 GB/s write, over 50 GB/s read, 16 threads| -|User quota|none| +| RAM disk | | +| ----------- | ------------------------------------------------------------------------------------------------------- | +| Mountpoint | /ramdisk | +| Accesspoint | /ramdisk/$PBS_JOBID | +| Capacity | 60GB at compute nodes without accelerator, 90GB at compute nodes with accelerator, 500GB at fat nodes | +| Throughput | over 1.5 GB/s write, over 5 GB/s read, single thread, over 10 GB/s write, over 50 GB/s read, 16 threads | +| User quota | none | -### tmp +### Tmp Each node is equipped with local /tmp directory of few GB capacity. The /tmp directory should be used to work with small temporary files. Old files in /tmp directory are automatically purged. -Summary ----------- +## Summary -|Mountpoint|Usage|Protocol|Net Capacity|Throughput|Limitations|Access|Services| -|---|---|---|---|---|---|---|---| -|/home|home directory|Lustre|320 TiB|2 GB/s|Quota 250GB|Compute and login nodes|backed up| -|/scratch|cluster shared jobs' data|Lustre|146 TiB|6 GB/s|Quota 100TB|Compute and login nodes|files older 90 days removed| -|/lscratch|node local jobs' data|local|330 GB|100 MB/s|none|Compute nodes|purged after job ends| -|/ramdisk|node local jobs' data|local|60, 90, 500 GB|5-50 GB/s|none|Compute nodes|purged after job ends| -|/tmp|local temporary files|local|9.5 GB|100 MB/s|none|Compute and login nodes|auto| purged +| Mountpoint | Usage | Protocol | Net Capacity | Throughput | Limitations | Access | Services | | +| ---------- | ------------------------- | -------- | -------------- | ---------- | ----------- | ----------------------- | --------------------------- | ------ | +| /home | home directory | Lustre | 320 TiB | 2 GB/s | Quota 250GB | Compute and login nodes | backed up | | +| /scratch | cluster shared jobs' data | Lustre | 146 TiB | 6 GB/s | Quota 100TB | Compute and login nodes | files older 90 days removed | | +| /lscratch | node local jobs' data | local | 330 GB | 100 MB/s | none | Compute nodes | purged after job ends | | +| /ramdisk | node local jobs' data | local | 60, 90, 500 GB | 5-50 GB/s | none | Compute nodes | purged after job ends | | +| /tmp | local temporary files | local | 9.5 GB | 100 MB/s | none | Compute and login nodes | auto | purged | + +## CESNET Data Storage -CESNET Data Storage ------------- Do not use shared filesystems at IT4Innovations as a backup for large amount of data or long-term archiving purposes. -!!! Note "Note" - The IT4Innovations does not provide storage capacity for data archiving. Academic staff and students of research institutions in the Czech Republic can use [CESNET Storage service](https://du.cesnet.cz/). +!!! note + The IT4Innovations does not provide storage capacity for data archiving. Academic staff and students of research institutions in the Czech Republic can use [CESNET Storage service](https://du.cesnet.cz/). The CESNET Storage service can be used for research purposes, mainly by academic staff and students of research institutions in the Czech Republic. @@ -334,27 +329,27 @@ User of data storage CESNET (DU) association can become organizations or an indi User may only use data storage CESNET for data transfer and storage which are associated with activities in science, research, development, the spread of education, culture and prosperity. In detail see “Acceptable Use Policy CESNET Large Infrastructure (Acceptable Use Policy, AUP)”. -The service is documented at <https://du.cesnet.cz/wiki/doku.php/en/start>. For special requirements please contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz). +The service is documented [here](https://du.cesnet.cz/en/start). For special requirements please contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz). The procedure to obtain the CESNET access is quick and trouble-free. (source [https://du.cesnet.cz/](https://du.cesnet.cz/wiki/doku.php/en/start "CESNET Data Storage")) -CESNET storage access ------------- -### Understanding CESNET storage +## CESNET Storage Access + +### Understanding CESNET Storage -!!! Note "Note" - It is very important to understand the CESNET storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. +!!! note + It is very important to understand the CESNET storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. Once registered for CESNET Storage, you may [access the storage](https://du.cesnet.cz/en/navody/faq/start) in number of ways. We recommend the SSHFS and RSYNC methods. ### SSHFS Access -!!! Note "Note" - SSHFS: The storage will be mounted like a local hard drive +!!! note + SSHFS: The storage will be mounted like a local hard drive -The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable hard drive. Files can be than copied in and out in a usual fashion. +The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable hard drive. Files can be than copied in and out in a usual fashion. First, create the mount point @@ -394,10 +389,10 @@ Once done, please remember to unmount the storage $ fusermount -u cesnet ``` -### Rsync access +### Rsync Access -!!! Note "Note" - Rsync provides delta transfer for best performance, can resume interrupted transfers +!!! note + Rsync provides delta transfer for best performance, can resume interrupted transfers Rsync is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination. Rsync is widely used for backups and mirroring and as an improved copy command for everyday use. diff --git a/docs.it4i/colors.md b/docs.it4i/colors.md deleted file mode 100644 index 568ebd2e4d42ff5ab4514224e469a30c1d07cc6f..0000000000000000000000000000000000000000 --- a/docs.it4i/colors.md +++ /dev/null @@ -1,62 +0,0 @@ -## Primary colors - -Click on a tile to change the primary color of the theme: - -<button data-md-color-primary="red">Red</button> -<button data-md-color-primary="pink">Pink</button> -<button data-md-color-primary="purple">Purple</button> -<button data-md-color-primary="deep-purple">Deep Purple</button> -<button data-md-color-primary="indigo">Indigo</button> -<button data-md-color-primary="blue">Blue</button> -<button data-md-color-primary="light-blue">Light Blue</button> -<button data-md-color-primary="cyan">Cyan</button> -<button data-md-color-primary="teal">Teal</button> -<button data-md-color-primary="green">Green</button> -<button data-md-color-primary="light-green">Light Green</button> -<button data-md-color-primary="lime">Lime</button> -<button data-md-color-primary="yellow">Yellow</button> -<button data-md-color-primary="amber">Amber</button> -<button data-md-color-primary="orange">Orange</button> -<button data-md-color-primary="deep-orange">Deep Orange</button> -<button data-md-color-primary="brown">Brown</button> -<button data-md-color-primary="grey">Grey</button> -<button data-md-color-primary="blue-grey">Blue Grey</button> - -<script> - var buttons = document.querySelectorAll("button[data-md-color-primary]"); - Array.prototype.forEach.call(buttons, function(button) { - button.addEventListener("click", function() { - document.body.dataset.mdColorPrimary = this.dataset.mdColorPrimary; - }) - }) -</script> - -## Accent colors - -Click on a tile to change the accent color of the theme: - -<button data-md-color-accent="red">Red</button> -<button data-md-color-accent="pink">Pink</button> -<button data-md-color-accent="purple">Purple</button> -<button data-md-color-accent="deep-purple">Deep Purple</button> -<button data-md-color-accent="indigo">Indigo</button> -<button data-md-color-accent="blue">Blue</button> -<button data-md-color-accent="light-blue">Light Blue</button> -<button data-md-color-accent="cyan">Cyan</button> -<button data-md-color-accent="teal">Teal</button> -<button data-md-color-accent="green">Green</button> -<button data-md-color-accent="light-green">Light Green</button> -<button data-md-color-accent="lime">Lime</button> -<button data-md-color-accent="yellow">Yellow</button> -<button data-md-color-accent="amber">Amber</button> -<button data-md-color-accent="orange">Orange</button> -<button data-md-color-accent="deep-orange">Deep Orange</button> - -<script> - var buttons = document.querySelectorAll("button[data-md-color-accent]"); - Array.prototype.forEach.call(buttons, function(button) { - button.addEventListener("click", function() { - document.body.dataset.mdColorAccent = this.dataset.mdColorAccent; - }) - }) -</script> diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md similarity index 85% rename from docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md rename to docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md index d392471ef04667686c5226f7dbc6e9055c193b98..f1c3573a84bd0e13a403e0b4b0566120585c1d22 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md +++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md @@ -1,17 +1,13 @@ -Graphical User Interface -======================== +# Graphical User Interface -X Window System ---------------- +## X Window System The X Window system is a principal way to get GUI access to the clusters. Read more about configuring [**X Window System**](x-window-system/). -VNC ---- +## VNC The **Virtual Network Computing** (**VNC**) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing "Desktop sharing") system that uses the [Remote Frame Buffer protocol (RFB)](http://en.wikipedia.org/wiki/RFB_protocol "RFB protocol") to remotely control another [computer](http://en.wikipedia.org/wiki/Computer "Computer"). Read more about configuring **[VNC](vnc/)**. - diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md similarity index 67% rename from docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc.md rename to docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md index b136cf4a0ace74fe507c3fe6d94169d3cb8fc2eb..f064b2e6a89dc4b2c8290a0b552eac82ca973941 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc.md +++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md @@ -1,15 +1,13 @@ -VNC -=== +# VNC The **Virtual Network Computing** (**VNC**) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing "Desktop sharing") system that uses the [Remote Frame Buffer protocol (RFB)](http://en.wikipedia.org/wiki/RFB_protocol "RFB protocol") to remotely control another [computer](http://en.wikipedia.org/wiki/Computer "Computer"). It transmits the [keyboard](http://en.wikipedia.org/wiki/Computer_keyboard "Computer keyboard") and [mouse](http://en.wikipedia.org/wiki/Computer_mouse") events from one computer to another, relaying the graphical [screen](http://en.wikipedia.org/wiki/Computer_screen "Computer screen") updates back in the other direction, over a [network](http://en.wikipedia.org/wiki/Computer_network "Computer network"). The recommended clients are [TightVNC](http://www.tightvnc.com) or [TigerVNC](http://sourceforge.net/apps/mediawiki/tigervnc/index.php?title=Main_Page) (free, open source, available for almost any platform). -Create VNC password -------------------- +## Create VNC Password -!!! Note "Note" - Local VNC password should be set before the first login. Do use a strong password. +!!! note + Local VNC password should be set before the first login. Do use a strong password. ```bash [username@login2 ~]$ vncpasswd @@ -17,20 +15,19 @@ Password: Verify: ``` -Start vncserver ---------------- +## Start Vncserver -!!! Note "Note" - To access VNC a local vncserver must be started first and also a tunnel using SSH port forwarding must be established. +!!! note + To access VNC a local vncserver must be started first and also a tunnel using SSH port forwarding must be established. - [See below](vnc.md#linux-example-of-creating-a-tunnel) for the details on SSH tunnels. In this example we use port 61. +[See below](vnc.md#linux-example-of-creating-a-tunnel) for the details on SSH tunnels. In this example we use port 61. You can find ports which are already occupied. Here you can see that ports " /usr/bin/Xvnc :79" and " /usr/bin/Xvnc :60" are occupied. ```bash [username@login2 ~]$ ps aux | grep Xvnc -username 5971 0.0 0.0 201072 92564 ? SN Sep22 4:19 /usr/bin/Xvnc :79 -desktop login2:79 (username) -auth /home/gre196/.Xauthority -geometry 1024x768 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5979 -fp catalogue:/etc/X11/fontpath.d -pn -username 10296 0.0 0.0 131772 21076 pts/29 SN 13:01 0:01 /usr/bin/Xvnc :60 -desktop login2:61 (username) -auth /home/username/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/jir13/.vnc/passwd -rfbport 5960 -fp catalogue:/etc/X11/fontpath.d -pn +username 5971 0.0 0.0 201072 92564 ? SN Sep22 4:19 /usr/bin/Xvnc :79 -desktop login2:79 (username) -auth /home/gre196/.Xauthority -geometry 1024x768 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5979 -fp catalogue:/etc/X11/fontpath.d -pn +username 10296 0.0 0.0 131772 21076 pts/29 SN 13:01 0:01 /usr/bin/Xvnc :60 -desktop login2:61 (username) -auth /home/username/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/jir13/.vnc/passwd -rfbport 5960 -fp catalogue:/etc/X11/fontpath.d -pn ..... ``` @@ -61,16 +58,15 @@ Another command: ```bash [username@login2 .vnc]$ ps aux | grep Xvnc -username 10296 0.0 0.0 131772 21076 pts/29 SN 13:01 0:01 /usr/bin/Xvnc :61 -desktop login2:61 (username) -auth /home/jir13/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5961 -fp catalogue:/etc/X11/fontpath.d -pn +username 10296 0.0 0.0 131772 21076 pts/29 SN 13:01 0:01 /usr/bin/Xvnc :61 -desktop login2:61 (username) -auth /home/jir13/.Xauthority -geometry 1600x900 -depth 16 -rfbwait 30000 -rfbauth /home/username/.vnc/passwd -rfbport 5961 -fp catalogue:/etc/X11/fontpath.d -pn ``` To access the VNC server you have to create a tunnel between the login node using TCP **port 5961** and your machine using a free TCP port (for simplicity the very same, in this case). -!!! Note "Note" - The tunnel must point to the same login node where you launched the VNC server, eg. login2. If you use just cluster-name.it4i.cz, the tunnel might point to a different node due to DNS round robin. +!!! note + The tunnel must point to the same login node where you launched the VNC server, eg. login2. If you use just cluster-name.it4i.cz, the tunnel might point to a different node due to DNS round robin. -Linux/Mac OS example of creating a tunnel ------------------------------------------ +## Linux/Mac OS Example of Creating a Tunnel At your machine, create the tunnel: @@ -109,8 +105,7 @@ You have to destroy the SSH tunnel which is still running at the background afte kill 2022 ``` -Windows example of creating a tunnel ------------------------------------- +## Windows Example of Creating a Tunnel Use PuTTY to log in on cluster. @@ -125,7 +120,7 @@ Search for the localhost and port number (in this case 127.0.0.1:5961). tcp 0 0 127.0.0.1:5961 0.0.0.0:* LISTEN 24031/Xvnc ``` -On the PuTTY Configuration screen go to Connection->SSH->Tunnels to set up the tunnel. +On the PuTTY Configuration screen go to Connection->SSH->Tunnels to set up the tunnel. Fill the Source port and Destination fields. **Do not forget to click the Add button**. @@ -133,29 +128,25 @@ Fill the Source port and Destination fields. **Do not forget to click the Add bu Run the VNC client of your choice, select VNC server 127.0.0.1, port 5961 and connect using VNC password. -Example of starting TigerVNC viewer ------------------------------------ +## Example of Starting TigerVNC Viewer  In this example, we connect to VNC server on port 5961, via the ssh tunnel, using TigerVNC viewer. The connection is encrypted and secured. The VNC server listening on port 5961 provides screen of 1600x900 pixels. -Example of starting TightVNC Viewer ------------------------------------ +## Example of Starting TightVNC Viewer Use your VNC password to log using TightVNC Viewer and start a Gnome Session on the login node.  -Gnome session -------------- +## Gnome Session You should see after the successful login.  -Disable your Gnome session screensaver --------------------------------------- +## Disable Your Gnome Session Screensaver Open Screensaver preferences dialog: @@ -165,21 +156,19 @@ Uncheck both options below the slider:  -Kill screensaver if locked screen ---------------------------------- +## Kill Screensaver if Locked Screen If the screen gets locked you have to kill the screensaver. Do not to forget to disable the screensaver then. ```bash [username@login2 .vnc]$ ps aux | grep screen -username 1503 0.0 0.0 103244 892 pts/4 S+ 14:37 0:00 grep screen -username 24316 0.0 0.0 270564 3528 ? Ss 14:12 0:00 gnome-screensaver +username 1503 0.0 0.0 103244 892 pts/4 S+ 14:37 0:00 grep screen +username 24316 0.0 0.0 270564 3528 ? Ss 14:12 0:00 gnome-screensaver [username@login2 .vnc]$ kill 24316 ``` -Kill vncserver after finished work ----------------------------------- +## Kill Vncserver After Finished Work You should kill your VNC server using command: @@ -195,12 +184,11 @@ Or this way: [username@login2 .vnc]$ pkill vnc ``` -GUI applications on compute nodes over VNC ------------------------------------------- +## GUI Applications on Compute Nodes Over VNC The very same methods as described above, may be used to run the GUI applications on compute nodes. However, for maximum performance, proceed following these steps: -Open a Terminal (Applications -> System Tools -> Terminal). Run all the next commands in the terminal. +Open a Terminal (Applications -> System Tools -> Terminal). Run all the next commands in the terminal.  @@ -210,7 +198,7 @@ Allow incoming X11 graphics from the compute nodes at the login node: $ xhost + ``` -Get an interactive session on a compute node (for more detailed info [look here](../../../anselm-cluster-documentation/resource-allocation-and-job-execution/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (24 cores in this example) from the production queue: +Get an interactive session on a compute node (for more detailed info [look here](../../../anselm/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (24 cores in this example) from the production queue: ```bash $ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A PROJECT_ID -q qprod -l select=1:ncpus=24 diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md similarity index 64% rename from docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md rename to docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md index 9952f60e130976f0e16cedc61accfd84958fdd01..b9c6951295a6b4d96fceb53c6d383464bee6d5c1 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md +++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md @@ -1,13 +1,11 @@ -X Window System -=============== +# X Window System The X Window system is a principal way to get GUI access to the clusters. The **X Window System** (commonly known as **X11**, based on its current major version being 11, or shortened to simply **X**, and sometimes informally **X-Windows**) is a computer software system and network [protocol](http://en.wikipedia.org/wiki/Protocol_%28computing%29 "Protocol (computing)") that provides a basis for [graphical user interfaces](http://en.wikipedia.org/wiki/Graphical_user_interface "Graphical user interface") (GUIs) and rich input device capability for [networked computers](http://en.wikipedia.org/wiki/Computer_network "Computer network"). -!!! Note "Note" - The X display forwarding must be activated and the X server running on client side +!!! tip + The X display forwarding must be activated and the X server running on client side -X display ---------- +## X Display In order to display graphical user interface GUI of various software tools, you need to enable the X display forwarding. On Linux and Mac, log in using the -X option tho ssh client: @@ -15,10 +13,9 @@ In order to display graphical user interface GUI of various software tools, you local $ ssh -X username@cluster-name.it4i.cz ``` -X Display Forwarding on Windows -------------------------------- +## X Display Forwarding on Windows -On Windows use the PuTTY client to enable X11 forwarding. In PuTTY menu, go to Connection->SSH->X11, mark the Enable X11 forwarding checkbox before logging in. Then log in as usual. +On Windows use the PuTTY client to enable X11 forwarding. In PuTTY menu, go to Connection-SSH-X11, mark the Enable X11 forwarding checkbox before logging in. Then log in as usual. To verify the forwarding, type @@ -34,33 +31,29 @@ localhost:10.0 then the X11 forwarding is enabled. -X Server --------- +## X Server In order to display graphical user interface GUI of various software tools, you need running X server on your desktop computer. For Linux users, no action is required as the X server is the default GUI environment on most Linux distributions. Mac and Windows users need to install and run the X server on their workstations. -X Server on OS X ----------------- +## X Server on OS X -Mac OS users need to install [XQuartz server](http://xquartz.macosforge.org/landing/). +Mac OS users need to install [XQuartz server](https://www.xquartz.org). -X Server on Windows -------------------- +## X Server on Windows There are variety of X servers available for Windows environment. The commercial Xwin32 is very stable and rich featured. The Cygwin environment provides fully featured open-source XWin X server. For simplicity, we recommend open-source X server by the [Xming project](http://sourceforge.net/projects/xming/). For stability and full features we recommend the [XWin](http://x.cygwin.com/) X server by Cygwin - |How to use Xwin |How to use Xming | - | --- | --- | - |[Install Cygwin](http://x.cygwin.com/) Find and execute XWin.exeto start the X server on Windows desktop computer.[If no able to forward X11 using PuTTY to CygwinX](cygwin-and-x11-forwarding/) |<p>Use Xlaunch to configure the Xming.<p>Run Xmingto start the X server on Windows desktop computer.| +| How to use Xwin | How to use Xming | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| [Install Cygwin](http://x.cygwin.com/) Find and execute XWin.exe to start the X server on Windows desktop computer.[If no able to forward X11 using PuTTY to CygwinX](#if-no-able-to-forward-x11-using-putty-to-cygwinx) | Use Xlaunch to configure the Xming. Run Xming to start the X server on Windows desktop computer. | Read more on [http://www.math.umn.edu/systems_guide/putty_xwin32.html](http://www.math.umn.edu/systems_guide/putty_xwin32.shtml) -Running GUI Enabled Applications --------------------------------- +## Running GUI Enabled Applications -!!! Note "Note" - Make sure that X forwarding is activated and the X server is running. +!!! note + Make sure that X forwarding is activated and the X server is running. Then launch the application as usual. Use the & to run the application in background. @@ -75,8 +68,7 @@ $ xterm In this example, we activate the intel programing environment tools, then start the graphical gvim editor. -GUI Applications on Compute Nodes ---------------------------------- +## GUI Applications on Compute Nodes Allocate the compute nodes using -X option on the qsub command @@ -94,13 +86,11 @@ $ ssh -X r24u35n680 In this example, we log in on the r24u35n680 compute node, with the X11 forwarding enabled. -The Gnome GUI Environment -------------------------- +## Gnome GUI Environment The Gnome 2.28 GUI environment is available on the clusters. We recommend to use separate X server window for displaying the Gnome environment. -Gnome on Linux and OS X ------------------------ +## Gnome on Linux and OS X To run the remote Gnome session in a window on Linux/OS X computer, you need to install Xephyr. Ubuntu package is xserver-xephyr, on OS X it is part of [XQuartz](http://xquartz.macosforge.org/landing/). First, launch Xephyr on local machine: @@ -109,7 +99,7 @@ xserver-xephyr, on OS X it is part of [XQuartz](http://xquartz.macosforge.org/la local $ Xephyr -ac -screen 1024x768 -br -reset -terminate :1 & ``` -This will open a new X window with size 1024 x 768 at DISPLAY :1. Next, ssh to the cluster with DISPLAY environment variable set and launch gnome-session +This will open a new X window with size 1024 x 768 at DISPLAY :1. Next, ssh to the cluster with DISPLAY environment variable set and launch gnome-session ```bash local $ DISPLAY=:1.0 ssh -XC yourname@cluster-name.it4i.cz -i ~/.ssh/path_to_your_key @@ -126,8 +116,7 @@ xinit /usr/bin/ssh -XT -i .ssh/path_to_your_key yourname@cluster-namen.it4i.cz g However this method does not seem to work with recent Linux distributions and you will need to manually source /etc/profile to properly set environment variables for PBS. -Gnome on Windows ----------------- +## Gnome on Windows Use Xlaunch to start the Xming server or run the XWin.exe. Select the "One window" mode. @@ -139,4 +128,29 @@ $ gnome-session & In this way, we run remote gnome session on the cluster, displaying it in the local X server -Use System->Log Out to close the gnome-session +Use System-Log Out to close the gnome-session + +### if No Able to Forward X11 Using PuTTY to CygwinX + +```bash +[usename@login1.anselm ~]$ gnome-session & +[1] 23691 +[usename@login1.anselm ~]$ PuTTY X11 proxy: unable to connect to forwarded X server: Network error: Connection refused +PuTTY X11 proxy: unable to connect to forwarded X server: Network error: Connection refused + + (gnome-session:23691): WARNING **: Cannot open display:** +``` + +1. Locate and modify Cygwin shortcut that uses [startxwin](http://x.cygwin.com/docs/man1/startxwin.1.html) + locate + C:cygwin64binXWin.exe + change it + to + C:_cygwin64binXWin.exe -listen tcp_ + + + +1. Check Putty settings: + Enable X11 forwarding + +  diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/introduction.md b/docs.it4i/general/accessing-the-clusters/introduction.md similarity index 53% rename from docs.it4i/get-started-with-it4innovations/accessing-the-clusters/introduction.md rename to docs.it4i/general/accessing-the-clusters/introduction.md index 6ebeac33092d3ae6ce76ca7cc9aac6ba2c1cf825..5ca0d7689130ec6d00649f4ba692e8e8a2742647 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/introduction.md +++ b/docs.it4i/general/accessing-the-clusters/introduction.md @@ -1,24 +1,24 @@ -Accessing the Clusters -====================== +# Accessing the Clusters The IT4Innovations clusters are accessed by SSH protocol via login nodes. -!!! Note "Note" - Read more on [Accessing the Salomon Cluster](../../salomon/shell-and-data-access.md) or [Accessing the Anselm Cluster](../../anselm-cluster-documentation/shell-and-data-access.md) pages. +!!! note + Read more on [Accessing the Salomon Cluster](../../salomon/shell-and-data-access.md) or [Accessing the Anselm Cluster](../../anselm/shell-and-data-access.md) pages. -PuTTY ------ +## PuTTY On **Windows**, use [PuTTY ssh client](shell-access-and-data-transfer/putty/). -SSH keys --------- +## SSH Keys Read more about [SSH keys management](shell-access-and-data-transfer/ssh-keys/). -Graphical User Interface ------------------------- +## Graphical User Interface Read more about [X Window System](./graphical-user-interface/x-window-system/). Read more about [Virtual Network Computing (VNC)](./graphical-user-interface/vnc/). + +## Accessing IT4Innovations Internal Resources via VPN + +Read more about [VPN Access](vpn-access/). diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md new file mode 100644 index 0000000000000000000000000000000000000000..7a4d63ed99a12aa345a37d6afbe65a1e8d1f459d --- /dev/null +++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md @@ -0,0 +1,107 @@ +# PuTTY (Windows) + +## Windows PuTTY Installer + +We recommned you to download "**A Windows installer for everything except PuTTYtel**" with **Pageant** (SSH authentication agent) and **PuTTYgen** (PuTTY key generator) which is available [here](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html). + +!!! note + After installation you can proceed directly to private keys authentication using ["Putty"](putty#putty). + + "Change Password for Existing Private Key" is optional. + + "Generate a New Public/Private key pair" is intended for users without Public/Private key in the initial email containing login credentials. + + "Pageant" is optional. + +## PuTTY - How to Connect to the IT4Innovations Cluster + +* Run PuTTY +* Enter Host name and Save session fields with [Login address](../../../salomon/shell-and-data-access.md) and browse Connection - SSH - Auth menu. The _Host Name_ input may be in the format **"username@clustername.it4i.cz"** so you don't have to type your login each time.In this example we will connect to the Salomon cluster using **"salomon.it4i.cz"**. + + + +* Category - Connection - SSH - Auth: + Select Attempt authentication using Pageant. + Select Allow agent forwarding. + Browse and select your [private key](ssh-keys/) file. + + + +* Return to Session page and Save selected configuration with _Save_ button. + + + +* Now you can log in using _Open_ button. + + + +* Enter your username if the _Host Name_ input is not in the format "username@salomon.it4i.cz". +* Enter passphrase for selected [private key](ssh-keys/) file if Pageant **SSH authentication agent is not used.** + +## Another PuTTY Settings + +* Category - Windows - Translation - Remote character set and select **UTF-8**. +* Category - Terminal - Features and select **Disable application keypad mode** (enable numpad) +* Save your configuration on Session page in to Default Settings with _Save_ button. + +## Pageant SSH Agent + +Pageant holds your private key in memory without needing to retype a passphrase on every login. + +* Run Pageant. +* On Pageant Key List press _Add key_ and select your private key (id_rsa.ppk). +* Enter your passphrase. +* Now you have your private key in memory without needing to retype a passphrase on every login. + + + +## PuTTY Key Generator + +PuTTYgen is the PuTTY key generator. You can load in an existing private key and change your passphrase or generate a new public/private key pair. + +### Change Password for Existing Private Key + +You can change the password of your SSH key with "PuTTY Key Generator". Make sure to backup the key. + +* Load your [private key](../shell-access-and-data-transfer/ssh-keys/) file with _Load_ button. +* Enter your current passphrase. +* Change key passphrase. +* Confirm key passphrase. +* Save your private key with _Save private key_ button. + + + +### Generate a New Public/Private Key + +You can generate an additional public/private key pair and insert public key into authorized_keys file for authentication with your own private key. + +* Start with _Generate_ button. + + + +* Generate some randomness. + + + +* Wait. + + + +* Enter a _comment_ for your key using format 'username@organization.example.com'. + Enter key passphrase. + Confirm key passphrase. + Save your new private key in "_.ppk" format with _Save private key\* button. + + + +* Save the public key with _Save public key_ button. + You can copy public key out of the ‘Public key for pasting into authorized_keys file’ box. + + + +* Export private key in OpenSSH format "id_rsa" using Conversion - Export OpenSSH key + + + +* Now you can insert additional public key into authorized_keys file for authentication with your own private key. + You must log in using ssh key received after registration. Then proceed to [How to add your own key](../shell-access-and-data-transfer/ssh-keys/). diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md similarity index 73% rename from docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md rename to docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md index ba5c29fdbe097438b4cc4eb158422c8fb8ec4137..a2a4d429fc06d4943a0ab89df247f410ccdc4bd2 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md +++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md @@ -1,8 +1,6 @@ -SSH keys -======== +# OpenSSH Keys (UN\*X) -Key management --------------- +## Key Management After logging in, you can see .ssh/ directory with SSH keys and authorized_keys file: @@ -12,21 +10,20 @@ After logging in, you can see .ssh/ directory with SSH keys and authorized_keys total 24 drwx------ 2 username username 4096 May 13 15:12 . drwxr-x---22 username username 4096 May 13 07:22 .. - -rw-r--r-- 1 username username 392 May 21 2014 authorized_keys - -rw------- 1 username username 1675 May 21 2014 id_rsa - -rw------- 1 username username 1460 May 21 2014 id_rsa.ppk - -rw-r--r-- 1 username username 392 May 21 2014 id_rsa.pub + -rw-r--r-- 1 username username 392 May 21 2014 authorized_keys + -rw------- 1 username username 1675 May 21 2014 id_rsa + -rw------- 1 username username 1460 May 21 2014 id_rsa.ppk + -rw-r--r-- 1 username username 392 May 21 2014 id_rsa.pub ``` -!!! Note "Note" - Please note that private keys in .ssh directory are without passphrase and allow you to connect within the cluster. +!!! hint + Private keys in .ssh directory are without passphrase and allow you to connect within the cluster. -Access privileges on .ssh folder --------------------------------- +## Access Privileges on .ssh Folder -- .ssh directory: 700 (drwx------) -- Authorized_keys, known_hosts and public key (.pub file): 644 (-rw-r--r--) -- Private key (id_rsa/id_rsa.ppk): 600 (-rw-------) +* .ssh directory: 700 (drwx------) +* Authorized_keys, known_hosts and public key (.pub file): 644 (-rw-r--r--) +* Private key (id_rsa/id_rsa.ppk): 600 (-rw-------) ```bash cd /home/username/ @@ -38,13 +35,12 @@ Access privileges on .ssh folder chmod 600 .ssh/id_rsa.ppk ``` -Private key ------------ +## Private Key -!!! Note "Note" +!!! note The path to a private key is usually /home/username/.ssh/ -Private key file in "id_rsa" or `"*.ppk" `format is used to authenticate with the servers. Private key is present locally on local side and used for example in SSH agent Pageant (for Windows users). The private key should always be kept in a safe place. +Private key file in "id_rsa" or `*.ppk` format is used to authenticate with the servers. Private key is present locally on local side and used for example in SSH agent Pageant (for Windows users). The private key should always be kept in a safe place. An example of private key format: @@ -78,10 +74,9 @@ An example of private key format: -----END RSA PRIVATE KEY----- ``` -Public key ----------- +## Public Key -Public key file in "*.pub" format is used to verify a digital signature. Public key is present on the remote side and allows access to the owner of the matching private key. +Public key file in "\*.pub" format is used to verify a digital signature. Public key is present on the remote side and allows access to the owner of the matching private key. An example of public key format: @@ -89,8 +84,7 @@ An example of public key format: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpujuOiTKCcGkbbBhrk0Hjmezr5QpM0swscXQE7fOZG0oQSURoapd9tjC9eVy5FvZ339jl1WkJkdXSRtjc2G1U5wQh77VE5qJT0ESxQCEw0S+CItWBKqXhC9E7gFY+UyP5YBZcOneh6gGHyCVfK6H215vzKr3x+/WvWl5gZGtbf+zhX6o4RJDRdjZPutYJhEsg/qtMxcCtMjfm/dZTnXeafuebV8nug3RCBUflvRb1XUrJuiX28gsd4xfG/P6L/mNMR8s4kmJEZhlhxpj8Th0iIc+XciVtXuGWQrbddcVRLxAmvkYAPGnVVOQeNj69pqAR/GXaFAhvjYkseEowQao1 username@organization.example.com ``` -How to add your own key ------------------------ +## How to Add Your Own Key First, generate a new keypair of your public and private key: @@ -98,8 +92,8 @@ First, generate a new keypair of your public and private key: local $ ssh-keygen -C 'username@organization.example.com' -f additional_key ``` -!!! Note "Note" - Please, enter **strong** **passphrase** for securing your private key. +!!! note + Please, enter **strong** **passphrase** for securing your private key. You can insert additional public key into authorized_keys file for authentication with your own private key. Additional records in authorized_keys file must be delimited by new line. Users are not advised to remove the default public key from authorized_keys file. @@ -111,7 +105,6 @@ Example: In this example, we add an additional public key, stored in file additional_key.pub into the authorized_keys. Next time we log in, we will be able to use the private addtional_key key to log in. -How to remove your own key --------------------------- +## How to Remove Your Own Key -Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (eg. *username@organization.example.com*). +Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (eg. _username@organization.example.com_). diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md new file mode 100644 index 0000000000000000000000000000000000000000..01123953847eefae3965c86e4896e6573f5514a5 --- /dev/null +++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md @@ -0,0 +1,17 @@ +# VPN - Connection fail in Win 8.1 + +## Failed to Initialize Connection Subsystem Win 8.1 - 02-10-15 MS Patch + +AnyConnect users on Windows 8.1 will receive a "Failed to initialize connection subsystem" error after installing the Windows 8.1 02/10/15 security patch. This OS defect introduced with the 02/10/15 patch update will also impact WIndows 7 users with IE11. Windows Server 2008/2012 are also impacted by this defect, but neither is a supported OS for AnyConnect. + +## Workaround + +* Close the Cisco AnyConnect Window and the taskbar mini-icon +* Right click vpnui.exe in the 'Cisco AnyConnect Secure Mobility Client' folder. (C:Program Files (x86)CiscoCisco AnyConnect Secure Mobility Client) +* Click on the 'Run compatibility troubleshooter' button +* Choose 'Try recommended settings' +* The wizard suggests Windows 8 compatibility. +* Click 'Test Program'. This will open the program. +* Close + + diff --git a/docs.it4i/get-started-with-it4innovations/vpn-access.md b/docs.it4i/general/accessing-the-clusters/vpn-access.md similarity index 69% rename from docs.it4i/get-started-with-it4innovations/vpn-access.md rename to docs.it4i/general/accessing-the-clusters/vpn-access.md index 8c51a6149fc636474279e4a86c3375ac2adc3e19..8f24a21f54aa37624035cf8aa42806af9d09c4a8 100644 --- a/docs.it4i/get-started-with-it4innovations/vpn-access.md +++ b/docs.it4i/general/accessing-the-clusters/vpn-access.md @@ -1,76 +1,74 @@ -VPN Access -========== +# VPN Access + +## Accessing IT4Innovations Internal Resources via VPN -Accessing IT4Innovations internal resources via VPN ---------------------------------------------------- For using resources and licenses which are located at IT4Innovations local network, it is necessary to VPN connect to this network. We use Cisco AnyConnect Secure Mobility Client, which is supported on the following operating systems: -- Windows XP -- Windows Vista -- Windows 7 -- Windows 8 -- Linux -- MacOS +* Windows XP +* Windows Vista +* Windows 7 +* Windows 8 +* Linux +* MacOS It is impossible to connect to VPN from other operating systems. -VPN client installation ------------------------------------- +## VPN Client Installation + You can install VPN client from web interface after successful login with LDAP credentials on address <https://vpn.it4i.cz/user> - + According to the Java settings after login, the client either automatically installs, or downloads installation file for your operating system. It is necessary to allow start of installation tool for automatic installation. - - - + + + After successful installation, VPN connection will be established and you can use available resources from IT4I network. - + If your Java setting doesn't allow automatic installation, you can download installation file and install VPN client manually. - + After you click on the link, download of installation file will start. - + After successful download of installation file, you have to execute this tool with administrator's rights and install VPN client manually. -Working with VPN client ------------------------ +## Working With VPN Client You can use graphical user interface or command line interface to run VPN client on all supported operating systems. We suggest using GUI. -Before the first login to VPN, you have to fill URL **[https://vpn.it4i.cz/user](https://vpn.it4i.cz/user)** into the text field. +Before the first login to VPN, you have to fill URL **<https://vpn.it4i.cz/user>** into the text field. - + After you click on the Connect button, you must fill your login credentials. - + After a successful login, the client will minimize to the system tray. If everything works, you can see a lock in the Cisco tray icon. - + If you right-click on this icon, you will see a context menu in which you can control the VPN connection. - + When you connect to the VPN for the first time, the client downloads the profile and creates a new item "IT4I cluster" in the connection list. For subsequent connections, it is not necessary to re-enter the URL address, but just select the corresponding item. - + Then AnyConnect automatically proceeds like in the case of first logon. - + After a successful logon, you can see a green circle with a tick mark on the lock icon. - + For disconnecting, right-click on the AnyConnect client icon in the system tray and select **VPN Disconnect**. diff --git a/docs.it4i/get-started-with-it4innovations/vpn1-access.md b/docs.it4i/general/accessing-the-clusters/vpn1-access.md similarity index 78% rename from docs.it4i/get-started-with-it4innovations/vpn1-access.md rename to docs.it4i/general/accessing-the-clusters/vpn1-access.md index dc7f0c5ce628953259a709c6a035d8ff6fb6b17f..b7cacfd2425b18c51abe54e82912854c1967c07d 100644 --- a/docs.it4i/get-started-with-it4innovations/vpn1-access.md +++ b/docs.it4i/general/accessing-the-clusters/vpn1-access.md @@ -1,27 +1,24 @@ -VPN Access -========== +# VPN Access -Accessing IT4Innovations internal resources via VPN ---------------------------------------------------- +## Accessing IT4Innovations Internal Resources via VPN -!!! Note "Note" - **Failed to initialize connection subsystem Win 8.1 - 02-10-15 MS patch** +!!! note + **Failed to initialize connection subsystem Win 8.1 - 02-10-15 MS patch** - Workaround can be found at [vpn-connection-fail-in-win-8.1](../../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.html) +Workaround can be found at [vpn-connection-fail-in-win-8.1](../../general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.html) For using resources and licenses which are located at IT4Innovations local network, it is necessary to VPN connect to this network. We use Cisco AnyConnect Secure Mobility Client, which is supported on the following operating systems: -- Windows XP -- Windows Vista -- Windows 7 -- Windows 8 -- Linux -- MacOS +* Windows XP +* Windows Vista +* Windows 7 +* Windows 8 +* Linux +* MacOS It is impossible to connect to VPN from other operating systems. -VPN client installation ------------------------------------- +## VPN Client Installation You can install VPN client from web interface after successful login with LDAP credentials on address <https://vpn1.it4i.cz/anselm> @@ -49,12 +46,11 @@ After you click on the link, download of installation file will start. After successful download of installation file, you have to execute this tool with administrator's rights and install VPN client manually. -Working with VPN client ------------------------ +## Working With VPN Client You can use graphical user interface or command line interface to run VPN client on all supported operating systems. We suggest using GUI. -Before the first login to VPN, you have to fill URL **https://vpn1.it4i.cz/anselm** into the text field. +Before the first login to VPN, you have to fill URL [**https://vpn1.it4i.cz/anselm**](https://vpn1.it4i.cz/anselm) into the text field.  diff --git a/docs.it4i/get-started-with-it4innovations/applying-for-resources.md b/docs.it4i/general/applying-for-resources.md similarity index 97% rename from docs.it4i/get-started-with-it4innovations/applying-for-resources.md rename to docs.it4i/general/applying-for-resources.md index 0467551d3541af5e1c97ac4106ea8eae92c45714..8875ec91b415637f2b90fb7218c980849e1f48f0 100644 --- a/docs.it4i/get-started-with-it4innovations/applying-for-resources.md +++ b/docs.it4i/general/applying-for-resources.md @@ -1,5 +1,4 @@ -Applying for Resources -====================== +# Applying for Resources Computational resources may be allocated by any of the following [Computing resources allocation](http://www.it4i.cz/computing-resources-allocation/?lang=en) mechanisms. diff --git a/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md similarity index 61% rename from docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md rename to docs.it4i/general/obtaining-login-credentials/certificates-faq.md index e94a3676c78067b2e5234430add287505b940758..bf0b5c5acc85d611237908cfecf5f8e73b07afd5 100644 --- a/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md +++ b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md @@ -1,65 +1,57 @@ -Certificates FAQ -================ +# Certificates FAQ FAQ about certificates in general -Q: What are certificates? -------------------------- +## Q: What Are Certificates? IT4Innovations employs X.509 certificates for secure communication (e. g. credentials exchange) and for grid services related to PRACE, as they present a single method of authentication for all PRACE services, where only one password is required. There are different kinds of certificates, each with a different scope of use. We mention here: -- User (Private) certificates -- Certificate Authority (CA) certificates -- Host certificates -- Service certificates +* User (Private) certificates +* Certificate Authority (CA) certificates +* Host certificates +* Service certificates However, users need only manage User and CA certificates. Note that your user certificate is protected by an associated private key, and this **private key must never be disclosed**. -Q: Which X.509 certificates are recognised by IT4Innovations? -------------------------------------------------------------- +## Q: Which X.509 Certificates Are Recognised by IT4Innovations? -Any certificate that has been issued by a Certification Authority (CA) from a member of the IGTF ([http:www.igtf.net](http://www.igtf.net/)) is recognised by IT4Innovations: European certificates are issued by members of the EUGridPMA ([https://www.eugridmpa.org](https://www.eugridpma.org/)), which is part if the IGTF and coordinates the trust fabric for e-Science Grid authentication within Europe. Further the Czech *"Qualified certificate" (Kvalifikovaný certifikát)* (provided by <http://www.postsignum.cz/> or <http://www.ica.cz/Kvalifikovany-certifikat.aspx>), that is used in electronic contact with Czech public authorities is accepted. +[The Certificates for Digital Signatures](obtaining-login-credentials/#the-certificates-for-digital-signatures). -Q: How do I get a User Certificate that can be used with IT4Innovations? ------------------------------------------------------------------------- +## Q: How Do I Get a User Certificate That Can Be Used With IT4Innovations? -To get a certificate, you must make a request to your local, IGTF approved, Certificate Authority (CA). Usually you then must visit, in person, your nearest Registration Authority (RA) to verify your affiliation and identity (photo identification is required). Usually, you will then be emailed details on how to retrieve your certificate, although procedures can vary between CAs. If you are in Europe, you can locate your trusted CA via <http://www.eugridpma.org/members/worldmap>. +To get a certificate, you must make a request to your local, IGTF approved, Certificate Authority (CA). Usually you then must visit, in person, your nearest Registration Authority (RA) to verify your affiliation and identity (photo identification is required). Usually, you will then be emailed details on how to retrieve your certificate, although procedures can vary between CAs. If you are in Europe, you can locate [your trusted CA](www.eugridpma.org/members/worldmap). In some countries certificates can also be retrieved using the TERENA Certificate Service, see the FAQ below for the link. -Q: Does IT4Innovations support short lived certificates (SLCS)? ---------------------------------------------------------------- +## Q: Does IT4Innovations Support Short Lived Certificates (SLCS)? Yes, provided that the CA which provides this service is also a member of IGTF. -Q: Does IT4Innovations support the TERENA certificate service? --------------------------------------------------------------- +## Q: Does IT4Innovations Support the TERENA Certificate Service? - Yes, ITInnovations supports TERENA eScience personal certificates. For more information, please visit [https://tcs-escience-portal.terena.org](https://tcs-escience-portal.terena.org/), where you also can find if your organisation/country can use this service + Yes, ITInnovations supports TERENA eScience personal certificates. For more information, please visit [TCS - Trusted Certificate Service](https://tcs-escience-portal.terena.org/), where you also can find if your organisation/country can use this service -Q: What format should my certificate take? ------------------------------------------- +## Q: What Format Should My Certificate Take? User Certificates come in many formats, the three most common being the ’PKCS12’, ’PEM’ and the JKS formats. The PKCS12 (often abbreviated to ’p12’) format stores your user certificate, along with your associated private key, in a single file. This form of your certificate is typically employed by web browsers, mail clients, and grid services like UNICORE, DART, gsissh-term and Globus toolkit (GSI-SSH, GridFTP and GRAM5). -The PEM format (*.pem) stores your user certificate and your associated private key in two separate files. This form of your certificate can be used by PRACE’s gsissh-term and with the grid related services like Globus toolkit (GSI-SSH, GridFTP and GRAM5). +The PEM format (`*`.pem) stores your user certificate and your associated private key in two separate files. This form of your certificate can be used by PRACE’s gsissh-term and with the grid related services like Globus toolkit (GSI-SSH, GridFTP and GRAM5). -To convert your Certificate from PEM to p12 formats, and *vice versa*, IT4Innovations recommends using the openssl tool (see separate FAQ entry). +To convert your Certificate from PEM to p12 formats, and _vice versa_, IT4Innovations recommends using the openssl tool (see separate FAQ entry). JKS is the Java KeyStore and may contain both your personal certificate with your private key and a list of your trusted CA certificates. This form of your certificate can be used by grid services like DART and UNICORE6. To convert your Certificate from p12 to JKS, IT4Innovations recommends using the keytool utiliy (see separate FAQ entry). -Q: What are CA certificates? ----------------------------- +## Q: What Are CA Certificates? Certification Authority (CA) certificates are used to verify the link between your user certificate and the authority which issued it. They are also used to verify the link between the host certificate of a IT4Innovations server and the CA which issued that certificate. In essence they establish a chain of trust between you and the target server. Thus, for some grid services, users must have a copy of all the CA certificates. -To assist users, SURFsara (a member of PRACE) provides a complete and up-to-date bundle of all the CA certificates that any PRACE user (or IT4Innovations grid services user) will require. Bundle of certificates, in either p12, PEM or JKS formats, are available from <http://winnetou.sara.nl/prace/certs/>. +To assist users, SURFsara (a member of PRACE) provides a complete and up-to-date bundle of all the CA certificates that any PRACE user (or IT4Innovations grid services user) will require. Bundle of certificates, in either p12, PEM or JKS formats, are [available here](https://winnetou.surfsara.nl/prace/certs/). It is worth noting that gsissh-term and DART automatically updates their CA certificates from this SURFsara website. In other cases, if you receive a warning that a server’s certificate can not be validated (not trusted), then please update your CA certificates via the SURFsara website. If this fails, then please contact the IT4Innovations helpdesk. @@ -69,19 +61,17 @@ Lastly, if you need the CA certificates for a personal Globus 5 installation, th myproxy-get-trustroots -s myproxy-prace.lrz.de ``` -If you run this command as ’root’, then it will install the certificates into /etc/grid-security/certificates. If you run this not as ’root’, then the certificates will be installed into $HOME/.globus/certificates. For Globus, you can download the globuscerts.tar.gz packet from <http://winnetou.sara.nl/prace/certs/>. +If you run this command as ’root’, then it will install the certificates into /etc/grid-security/certificates. If you run this not as ’root’, then the certificates will be installed into $HOME/.globus/certificates. For Globus, you can download the globuscerts.tar.gz packet [available here](https://winnetou.surfsara.nl/prace/certs/). -Q: What is a DN and how do I find mine? ---------------------------------------- +## Q: What Is a DN and How Do I Find Mine? DN stands for Distinguished Name and is part of your user certificate. IT4Innovations needs to know your DN to enable your account to use the grid services. You may use openssl (see below) to determine your DN or, if your browser contains your user certificate, you can extract your DN from your browser. -For Internet Explorer users, the DN is referred to as the "subject" of your certificate. Tools->Internet Options->Content->Certificates->View->Details->Subject. +For Internet Explorer users, the DN is referred to as the "subject" of your certificate. ToolsInternet OptionsContentCertificatesViewDetailsSubject. -For users running Firefox under Windows, the DN is referred to as the "subject" of your certificate. Tools->Options->Advanced->Encryption->View Certificates. Highlight your name and then Click View->Details->Subject. +For users running Firefox under Windows, the DN is referred to as the "subject" of your certificate. ToolsOptionsAdvancedEncryptionView Certificates. Highlight your name and then Click ViewDetailsSubject. -Q: How do I use the openssl tool? ---------------------------------- +## Q: How Do I Use the Openssl Tool? The following examples are for Unix/Linux operating systems only. @@ -92,7 +82,7 @@ To convert from PEM to p12, enter the following command: username.p12 ``` -To convert from p12 to PEM, type the following *four* commands: +To convert from p12 to PEM, type the following _four_ commands: ```bash openssl pkcs12 -in username.p12 -out usercert.pem -clcerts -nokeys @@ -114,10 +104,9 @@ To check your certificate (e.g., DN, validity, issuer, public key algorithm, etc openssl x509 -in usercert.pem -text -noout ``` -To download openssl for both Linux and Windows, please visit <http://www.openssl.org/related/binaries.html>. On Macintosh Mac OS X computers openssl is already pre-installed and can be used immediately. +To download openssl if not pre-installed, [please visit](https://www.openssl.org/source/). On Macintosh Mac OS X computers openssl is already pre-installed and can be used immediately. -Q: How do I create and then manage a keystore? ----------------------------------------------- +## Q: How Do I Create and Then Manage a Keystore? IT4innovations recommends the java based keytool utility to create and manage keystores, which themselves are stores of keys and certificates. For example if you want to convert your pkcs12 formatted key pair into a java keystore you can use the following command. @@ -137,39 +126,34 @@ You also can import CA certificates into your java keystore with the tool, e.g.: where $mydomain.crt is the certificate of a trusted signing authority (CA) and $mydomain is the alias name that you give to the entry. -More information on the tool can be found at:<http://docs.oracle.com/javase/7/docs/technotes/tools/solaris/keytool.html> +More information on the tool can be found [here](http://docs.oracle.com/javase/7/docs/technotes/tools/solaris/keytool.html) -Q: How do I use my certificate to access the different grid Services? ---------------------------------------------------------------------- +## Q: How Do I Use My Certificate to Access the Different Grid Services? Most grid services require the use of your certificate; however, the format of your certificate depends on the grid Service you wish to employ. If employing the PRACE version of GSISSH-term (also a Java Web Start Application), you may use either the PEM or p12 formats. Note that this service automatically installs up-to-date PRACE CA certificates. -If the grid service is UNICORE, then you bind your certificate, in either the p12 format or JKS, to UNICORE during the installation of the client on your local machine. For more information, please visit [UNICORE6 in PRACE](http://www.prace-ri.eu/UNICORE6-in-PRACE) +If the grid service is UNICORE, then you bind your certificate, in either the p12 format or JKS, to UNICORE during the installation of the client on your local machine. For more information, please visit [UNICORE6 in PRACE](www.prace-ri.eu/UNICORE6-in-PRACE) If the grid service is part of Globus, such as GSI-SSH, GriFTP or GRAM5, then the certificates can be in either p12 or PEM format and must reside in the "$HOME/.globus" directory for Linux and Mac users or %HOMEPATH%.globus for Windows users. (Windows users will have to use the DOS command ’cmd’ to create a directory which starts with a ’.’). Further, user certificates should be named either "usercred.p12" or "usercert.pem" and "userkey.pem", and the CA certificates must be kept in a pre-specified directory as follows. For Linux and Mac users, this directory is either $HOME/.globus/certificates or /etc/grid-security/certificates. For Windows users, this directory is %HOMEPATH%.globuscertificates. (If you are using GSISSH-Term from prace-ri.eu then you do not have to create the .globus directory nor install CA certificates to use this tool alone). -Q: How do I manually import my certificate into my browser? ------------------------------------------------------------ +## Q: How Do I Manually Import My Certificate Into My Browser? -If you employ the Firefox browser, then you can import your certificate by first choosing the "Preferences" window. For Windows, this is Tools->Options. For Linux, this is Edit->Preferences. For Mac, this is Firefox->Preferences. Then, choose the "Advanced" button; followed by the "Encryption" tab. Then, choose the "Certificates" panel; select the option "Select one automatically" if you have only one certificate, or "Ask me every time" if you have more then one. Then click on the "View Certificates" button to open the "Certificate Manager" window. You can then select the "Your Certificates" tab and click on button "Import". Then locate the PKCS12 (.p12) certificate you wish to import, and employ its associated password. +If you employ the Firefox browser, then you can import your certificate by first choosing the "Preferences" window. For Windows, this is ToolsOptions. For Linux, this is EditPreferences. For Mac, this is FirefoxPreferences. Then, choose the "Advanced" button; followed by the "Encryption" tab. Then, choose the "Certificates" panel; select the option "Select one automatically" if you have only one certificate, or "Ask me every time" if you have more then one. Then click on the "View Certificates" button to open the "Certificate Manager" window. You can then select the "Your Certificates" tab and click on button "Import". Then locate the PKCS12 (.p12) certificate you wish to import, and employ its associated password. -If you are a Safari user, then simply open the "Keychain Access" application and follow "File->Import items". +If you are a Safari user, then simply open the "Keychain Access" application and follow "FileImport items". -If you are an Internet Explorer user, click Start->Settings->Control Panel and then double-click on Internet. On the Content tab, click Personal, and then click Import. In the Password box, type your password. NB you may be prompted multiple times for your password. In the "Certificate File To Import" box, type the filename of the certificate you wish to import, and then click OK. Click Close, and then click OK. +If you are an Internet Explorer user, click StartSettingsControl Panel and then double-click on Internet. On the Content tab, click Personal, and then click Import. In the Password box, type your password. NB you may be prompted multiple times for your password. In the "Certificate File To Import" box, type the filename of the certificate you wish to import, and then click OK. Click Close, and then click OK. -Q: What is a proxy certificate? -------------------------------- +## Q: What Is a Proxy Certificate? -A proxy certificate is a short-lived certificate which may be employed by UNICORE and the Globus services. The proxy certificate consists of a new user certificate and a newly generated proxy private key. This proxy typically has a rather short lifetime (normally 12 hours) and often only allows a limited delegation of rights. Its default location, for Unix/Linux, is /tmp/x509_u*uid* but can be set via the $X509_USER_PROXY environment variable. +A proxy certificate is a short-lived certificate which may be employed by UNICORE and the Globus services. The proxy certificate consists of a new user certificate and a newly generated proxy private key. This proxy typically has a rather short lifetime (normally 12 hours) and often only allows a limited delegation of rights. Its default location, for Unix/Linux, is /tmp/x509_u_uid_ but can be set via the $X509_USER_PROXY environment variable. -Q: What is the MyProxy service? -------------------------------- +## Q: What Is the MyProxy Service? [The MyProxy Service](http://grid.ncsa.illinois.edu/myproxy/) , can be employed by gsissh-term and Globus tools, and is an online repository that allows users to store long lived proxy certificates remotely, which can then be retrieved for use at a later date. Each proxy is protected by a password provided by the user at the time of storage. This is beneficial to Globus users as they do not have to carry their private keys and certificates when travelling; nor do users have to install private keys and certificates on possibly insecure computers. -Q: Someone may have copied or had access to the private key of my certificate either in a separate file or in the browser. What should I do? --------------------------------------------------------------------------------------------------------------------------------------------- +## Q: Someone May Have Copied or Had Access to the Private Key of My Certificate Either in a Separate File or in the Browser. What Should I Do? Please ask the CA that issued your certificate to revoke this certificate and to supply you with a new one. In addition, please report this to IT4Innovations by contacting [the support team](https://support.it4i.cz/rt). diff --git a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md new file mode 100644 index 0000000000000000000000000000000000000000..7fb2cb4ef8b4fae3b024efa250eeb53ad6b312fa --- /dev/null +++ b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md @@ -0,0 +1,171 @@ +# Obtaining Login Credentials + +## Obtaining Authorization + +The computational resources of IT4I are allocated by the Allocation Committee to a [Project](/), investigated by a Primary Investigator. By allocating the computational resources, the Allocation Committee is authorizing the PI to access and use the clusters. The PI may decide to authorize a number of her/his Collaborators to access and use the clusters, to consume the resources allocated to her/his Project. These collaborators will be associated to the Project. The Figure below is depicting the authorization chain: + + + +!!! note + You need to either [become the PI](../applying-for-resources/) or [be named as a collaborator](obtaining-login-credentials/#authorization-by-web) by a PI in order to access and use the clusters. + +Head of Supercomputing Services acts as a PI of a project DD-13-5. Joining this project, you may **access and explore the clusters**, use software, development environment and computers via the qexp and qfree queues. You may use these resources for own education/research, no paperwork is required. All IT4I employees may contact the Head of Supercomputing Services in order to obtain **free access to the clusters**. + +## Authorization of PI by Allocation Committee + +The PI is authorized to use the clusters by the allocation decision issued by the Allocation Committee.The PI will be informed by IT4I about the Allocation Committee decision. + +## Authorization by Web + +!!! warning + **Only** for those who already have their IT4I HPC account. This is a preferred way of granting access to project resources. Please, use this method whenever it's possible. + +This is a preferred way of granting access to project resources. Please, use this method whenever it's possible. + +Log in to the [IT4I Extranet portal](https://extranet.it4i.cz) using IT4I credentials and go to the **Projects** section. + +* **Users:** Please, submit your requests for becoming a project member. +* **Primary Investigators:** Please, approve or deny users' requests in the same section. + +## Authorization by E-Mail (An Alternative Approach) + +In order to authorize a Collaborator to utilize the allocated resources, the PI should contact the [IT4I support](https://support.it4i.cz/rt/) (E-mail: [support\[at\]it4i.cz](mailto:support@it4i.cz)) and provide following information: + +1. Identify your project by project ID +1. Provide list of people, including himself, who are authorized to use the resources allocated to the project. The list must include full name, e-mail and affiliation. Provide usernames as well, if collaborator login access already exists on the IT4I systems. +1. Include "Authorization to IT4Innovations" into the subject line. + +!!! warning + Should the above information be provided by e-mail, the e-mail **must be** digitally signed. Read more on [digital signatures](#certificates-for-digital-signatures) below. + +Example (except the subject line which must be in English, you may use Czech or Slovak language for communication with us): + +```bash + Subject: Authorization to IT4Innovations + + Dear support, + + Please include my collaborators to project OPEN-0-0. + + John Smith, john.smith@myemail.com, Department of Chemistry, MIT, US + Jonas Johansson, jjohansson@otheremail.se, Department of Physics, Royal Institute of Technology, Sweden + Luisa Fibonacci, lf@emailitalia.it, Department of Mathematics, National Research Council, Italy + + Thank you, + PI + (Digitally signed) +``` + +## Login Credentials + +Once authorized by PI, every person (PI or Collaborator) wishing to access the clusters, should contact the [IT4I support](https://support.it4i.cz/rt/) (E-mail: [support\[at\]it4i.cz](mailto:support@it4i.cz)) providing following information: + +1. Project ID +1. Full name and affiliation +1. Statement that you have read and accepted the [Acceptable use policy document](http://www.it4i.cz/acceptable-use-policy.pdf) (AUP). +1. Attach the AUP file. +1. Your preferred username, max 8 characters long. The preferred username must associate your surname and name or be otherwise derived from it. Only alphanumeric sequences, dash and underscore signs are allowed. +1. In case you choose [Alternative way to personal certificate](#alternative-way-to-personal-certificate), a **scan of photo ID** (personal ID or passport or driver license) is required + +!!! warning + Should the above information be provided by e-mail, the e-mail **must be** digitally signed. Read more on [digital signatures](#certificates-for-digital-signatures) below. + +Example (except the subject line which must be in English, you may use Czech or Slovak language for communication with us): + +```bash + Subject: Access to IT4Innovations + + Dear support, + + Please open the user account for me and attach the account to OPEN-0-0 + Name and affiliation: John Smith, john.smith@myemail.com, Department of Chemistry, MIT, US + I have read and accept the Acceptable use policy document (attached) + + Preferred username: johnsm + + Thank you, + John Smith + (Digitally signed) +``` + +You will receive your personal login credentials by protected e-mail. The login credentials include: + +1. username +1. ssh private key and private key passphrase +1. system password + +The clusters are accessed by the [private key](../accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) and username. Username and password is used for login to the [information systems](http://support.it4i.cz/). + +## Change Passphrase + +On Linux, use + +```bash +local $ ssh-keygen -f id_rsa -p +``` + +On Windows, use [PuTTY Key Generator](../accessing-the-clusters/shell-access-and-data-transfer/putty/#putty-key-generator). + +## Change Password + +Change password in [your user profile](https://extranet.it4i.cz/user/). + +## Certificates for Digital Signatures + +We accept personal certificates issued by any widely respected certification authority (CA). This includes certificates by CAs organized in [International Grid Trust Federation](http://www.igtf.net/), its European branch [EUGridPMA](https://www.eugridpma.org/) and its member organizations, e.g. the [CESNET certification authority](https://tcs.cesnet.cz). The Czech _"Qualified certificate" (Kvalifikovaný certifikát)_ provided by [PostSignum](http://www.postsignum.cz/) or [I.CA](http://www.ica.cz/Kvalifikovany-certifikat.aspx), that is used in electronic contact with Czech authorities is accepted as well. + +Certificate generation process for academic purposes, utilizing the CESNET certification authority, is well-described here: + +* [How to generate a personal TCS certificate in Mozilla Firefox web browser (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/moz-cert-gen) + +If you are not able to obtain certificate from any of the respected certification authorities, please follow the Alternative Way bellow. + +A FAQ about certificates can be found here: [Certificates FAQ](certificates-faq/). + +## Alternative Way to Personal Certificate + +Follow these steps **only** if you can not obtain your certificate in a standard way. In case you choose this procedure, please attach a **scan of photo ID** (personal ID or passport or drivers license) when applying for login credentials. + +!!! warning + Please use Firefox (clone) for following steps. Other browsers, like Chrome, are not compatible. + +* Go to [COMODO Application for Secure Email Certificate](https://secure.comodo.com/products/frontpage?area=SecureEmailCertificate). +* Fill in the form, accept the Subscriber Agreement and submit it by the _Next_ button. + * Type in the e-mail address, which you intend to use for communication with us. + * Don't forget your chosen _Revocation password_. +* You will receive an e-mail with link to collect your certificate. Be sure to open the link in the same browser, in which you submited the application. +* Your browser should notify you, that the certificate has been correctly installed in it. Now you will need to save it as a file. +* In Firefox navigate to _Options > Advanced > Certificates > View Certificates_. +* Choose the _Your Certificates_ tab and find the fresh certificate with today's date. +* Select it and hit the _Backup..._ button +* Standard save dialog should appear, where you can choose tha name of your certificate file for your easy identification in the future. +* You will be prompted to choose a passphrase for yor new certificate. This passphrase will be needed for installation into your favourite email client. + +!!! note + Certificate file now can be installed into your email client. Web-based email interfaces cannot be used for secure communication, externall application, such as Thunderbird or Outlook must be used (instructions bellow). This way, your new credentials will be visible only in applications, that have access to your certificate. + +## Installation of the Certificate Into Your Mail Client + +The procedure is similar to the following guides: + +MS Outlook 2010 + +* [How to Remove, Import, and Export Digital certificates](http://support.microsoft.com/kb/179380) +* [Importing a PKCS #12 certificate (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/outl-cert-imp) + +Mozilla Thudnerbird + +* [Installing an SMIME certificate](https://support.globalsign.com/customer/portal/articles/1214955-install-certificate---mozilla-thunderbird) +* [Importing a PKCS #12 certificate (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/moz-cert-imp) + +## End of User Account Lifecycle + +User accounts are supported by membership in active Project(s) or by affiliation to IT4Innovations. User accounts, that loose the support (meaning, are not attached to an active project and are not affiliated with IT4I), will be deleted 1 year after the last project to which they were attached expires. + +User will get 3 automatically generated warning e-mail messages of the pending removal:. + +* First message will be sent 3 months before the removal +* Second message will be sent 1 month before the removal +* Third message will be sent 1 week before the removal. + +The messages will inform about the projected removal date and will challenge the user to migrate her/his data diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md deleted file mode 100644 index e98bf9f04e64354e965d02ac4cbc15ae531636c0..0000000000000000000000000000000000000000 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md +++ /dev/null @@ -1,27 +0,0 @@ -Cygwin and X11 forwarding -========================= - -**If no able to forward X11 using PuTTY to CygwinX** - -```bash -[usename@login1.anselm ~]$ gnome-session & -[1] 23691 -[usename@login1.anselm ~]$ PuTTY X11 proxy: unable to connect to forwarded X server: Network error: Connection refused -PuTTY X11 proxy: unable to connect to forwarded X server: Network error: Connection refused - - (gnome-session:23691): WARNING **: Cannot open display:** -``` - -1. Locate and modify Cygwin shortcut that uses [startxwin](http://x.cygwin.com/docs/man1/startxwin.1.html) - locate - C:cygwin64binXWin.exe - change it - to - C:*cygwin64binXWin.exe -listen tcp* - - - -2. Check Putty settings: - Enable X11 forwarding - -  diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md deleted file mode 100644 index e064dad2cb7d8519c5e78e22121df27d71007be9..0000000000000000000000000000000000000000 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md +++ /dev/null @@ -1,12 +0,0 @@ -Pageant SSH agent -================= - -Pageant holds your private key in memory without needing to retype a passphrase on every login. - -- Run Pageant. -- On Pageant Key List press *Add key* and select your private key (id_rsa.ppk). -- Enter your passphrase. -- Now you have your private key in memory without needing to retype a passphrase on every login. - - - diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md deleted file mode 100644 index a6128d1da60bd86a1b35e8c12558e38f95a2f448..0000000000000000000000000000000000000000 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md +++ /dev/null @@ -1,62 +0,0 @@ -PuTTY -===== - -!!! Note "Note" - PuTTY - before we start SSH connection - -Windows PuTTY Installer ------------------------ - -We recommned you to download "**A Windows installer for everything except PuTTYtel**" with **Pageant** (SSH authentication agent) and **PuTTYgen** (PuTTY key generator) which is available [here](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html). - -!!! Note "Note" - After installation you can proceed directly to private keys authentication using ["Putty"](putty#putty). - - "Change Password for Existing Private Key" is optional. - - "Generate a New Public/Private key pair" is intended for users without Public/Private key in the initial email containing login credentials. - - "Pageant" is optional. - -PuTTYgen --------- - -PuTTYgen is the PuTTY key generator. Read more how to load in an existing private key and change your passphrase or generate a new public/private key pair using [PuTTYgen](puttygen) if needed. - -Pageant SSH agent ------------------ - -[Pageant](pageant) holds your private key in memory without needing to retype a passphrase on every login. We recommend its usage. - -PuTTY - how to connect to the IT4Innovations cluster ----------------------------------------------------- - -- Run PuTTY -- Enter Host name and Save session fields with [Login address](../../../salomon/shell-and-data-access.md) and browse Connection - > SSH -> Auth menu. The *Host Name* input may be in the format **"username@clustername.it4i.cz"** so you don't have to type your login each time.In this example we will connect to the Salomon cluster using **"salomon.it4i.cz"**. - - - -- Category -> Connection - > SSH -> Auth: - Select Attempt authentication using Pageant. - Select Allow agent forwarding. - Browse and select your [private key](ssh-keys/) file. - - - -- Return to Session page and Save selected configuration with *Save* button. - - - -- Now you can log in using *Open* button. - - - -- Enter your username if the *Host Name* input is not in the format "username@salomon.it4i.cz". -- Enter passphrase for selected [private key](ssh-keys/) file if Pageant **SSH authentication agent is not used.** - -Another PuTTY Settings ----------------------- - -- Category -> Windows -> Translation -> Remote character set and select **UTF-8**. -- Category -> Terminal -> Features and select **Disable application keypad mode** (enable numpad) -- Save your configuration on Session page in to Default Settings with *Save* button. diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md deleted file mode 100644 index 74848055b7f8a00d0a1c53b3a8a76497992c6416..0000000000000000000000000000000000000000 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md +++ /dev/null @@ -1,52 +0,0 @@ -PuTTY key generator -=================== - -PuTTYgen is the PuTTY key generator. You can load in an existing private key and change your passphrase or generate a new public/private key pair. - -### Change Password for Existing Private Key - -You can change the password of your SSH key with "PuTTY Key Generator". Make sure to backup the key. - -- Load your [private key](../shell-access-and-data-transfer/ssh-keys/) file with *Load* button. -- Enter your current passphrase. -- Change key passphrase. -- Confirm key passphrase. -- Save your private key with *Save private key* button. - - - -Generate a New Public/Private key ---------------------------------- - -You can generate an additional public/private key pair and insert public key into authorized_keys file for authentication with your own private key. - -- Start with *Generate* button. - - - -- Generate some randomness. - - - -- Wait. - - - -- Enter a *comment* for your key using format 'username@organization.example.com'. - Enter key passphrase. - Confirm key passphrase. - Save your new private key `in "*.ppk" `format with *Save private key* button. - - - -- Save the public key with *Save public key* button. - You can copy public key out of the ‘Public key for pasting into authorized_keys file’ box. - - - -- Export private key in OpenSSH format "id_rsa" using Conversion -> Export OpenSSH key - - - -- Now you can insert additional public key into authorized_keys file for authentication with your own private key. - You must log in using ssh key received after registration. Then proceed to [How to add your own key](../shell-access-and-data-transfer/ssh-keys/). diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md deleted file mode 100644 index 03e8702677432f344a816a28378c2a9780007ece..0000000000000000000000000000000000000000 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md +++ /dev/null @@ -1,19 +0,0 @@ -VPN - Connection fail in Win 8.1 -================================ - -**Failed to initialize connection subsystem Win 8.1 - 02-10-15 MS patch** - -AnyConnect users on Windows 8.1 will receive a "Failed to initialize connection subsystem" error after installing the Windows 8.1 02/10/15 security patch. This OS defect introduced with the 02/10/15 patch update will also impact WIndows 7 users with IE11. Windows Server 2008/2012 are also impacted by this defect, but neither is a supported OS for AnyConnect. - -**Workaround:** - -- Close the Cisco AnyConnect Window and the taskbar mini-icon -- Right click vpnui.exe in the 'Cisco AnyConnect Secure Mobility Client' folder. (C:Program Files (x86)CiscoCisco AnyConnect Secure Mobility Client) -- Click on the 'Run compatibility troubleshooter' button -- Choose 'Try recommended settings' -- The wizard suggests Windows 8 compatibility. -- Click 'Test Program'. This will open the program. -- Close - - - diff --git a/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials.md b/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials.md deleted file mode 100644 index b01ecc776582103c96ca34b8f70082f77135145d..0000000000000000000000000000000000000000 --- a/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials.md +++ /dev/null @@ -1,180 +0,0 @@ -Obtaining Login Credentials -=========================== - -Obtaining Authorization ------------------------ -The computational resources of IT4I are allocated by the Allocation Committee to a [Project](/), investigated by a Primary Investigator. By allocating the computational resources, the Allocation Committee is authorizing the PI to access and use the clusters. The PI may decide to authorize a number of her/his Collaborators to access and use the clusters, to consume the resources allocated to her/his Project. These collaborators will be associated to the Project. The Figure below is depicting the authorization chain: - - - -!!! Note "Note" - You need to either [become the PI](../applying-for-resources/) or [be named as a collaborator](obtaining-login-credentials/#authorization-by-web) by a PI in order to access and use the clusters. - -Head of Supercomputing Services acts as a PI of a project DD-13-5. Joining this project, you may **access and explore the clusters**, use software, development environment and computers via the qexp and qfree queues. You may use these resources for own education/research, no paperwork is required. All IT4I employees may contact the Head of Supercomputing Services in order to obtain **free access to the clusters**. - -Authorization of PI by Allocation Committee -------------------------------------------- - -The PI is authorized to use the clusters by the allocation decision issued by the Allocation Committee.The PI will be informed by IT4I about the Allocation Committee decision. - -Authorization by web --------------------- - -!!! Note "Note" - **Only** for those who already have their IT4I HPC account. This is a preferred way of granting access to project resources. Please, use this method whenever it's possible. - -This is a preferred way of granting access to project resources. Please, use this method whenever it's possible. - -Log in to the [IT4I Extranet portal](https://extranet.it4i.cz) using IT4I credentials and go to the **Projects** section. - -- **Users:** Please, submit your requests for becoming a project member. -- **Primary Investigators:** Please, approve or deny users' requests in the same section. - -Authorization by e-mail (an alternative approach) -------------------------------------------------- - -In order to authorize a Collaborator to utilize the allocated resources, the PI should contact the [IT4I support](https://support.it4i.cz/rt/) (E-mail: [support[at]it4i.cz](mailto:support@it4i.cz)) and provide following information: - -1. Identify your project by project ID -2. Provide list of people, including himself, who are authorized to use the resources allocated to the project. The list must include full name, e-mail and affiliation. Provide usernames as well, if collaborator login access already exists on the IT4I systems. -3. Include "Authorization to IT4Innovations" into the subject line. - -Example (except the subject line which must be in English, you may use Czech or Slovak language for communication with us): - -```bash - Subject: Authorization to IT4Innovations - - Dear support, - - Please include my collaborators to project OPEN-0-0. - - John Smith, john.smith@myemail.com, Department of Chemistry, MIT, US - Jonas Johansson, jjohansson@otheremail.se, Department of Physics, Royal Institute of Technology, Sweden - Luisa Fibonacci, lf@emailitalia.it, Department of Mathematics, National Research Council, Italy - - Thank you, - PI - (Digitally signed) -``` - -Should the above information be provided by e-mail, the e-mail **must be** digitally signed. Read more on [digital signatures](obtaining-login-credentials/#the-certificates-for-digital-signatures) below. - -The Login Credentials ---------------------- - -Once authorized by PI, every person (PI or Collaborator) wishing to access the clusters, should contact the [IT4I support](https://support.it4i.cz/rt/) (E-mail: [support[at]it4i.cz](mailto:support@it4i.cz)) providing following information: - -1. Project ID -2. Full name and affiliation -3. Statement that you have read and accepted the [Acceptable use policy document](http://www.it4i.cz/acceptable-use-policy.pdf) (AUP). -4. Attach the AUP file. -5. Your preferred username, max 8 characters long. The preferred username must associate your surname and name or be otherwise derived from it. Only alphanumeric sequences, dash and underscore signs are allowed. -6. In case you choose [Alternative way to personal certificate](obtaining-login-credentials/#alternative-way-of-getting-personal-certificate), a **scan of photo ID** (personal ID or passport or driver license) is required - -Example (except the subject line which must be in English, you may use Czech or Slovak language for communication with us): - -```bash - Subject: Access to IT4Innovations - - Dear support, - - Please open the user account for me and attach the account to OPEN-0-0 - Name and affiliation: John Smith, john.smith@myemail.com, Department of Chemistry, MIT, US - I have read and accept the Acceptable use policy document (attached) - - Preferred username: johnsm - - Thank you, - John Smith - (Digitally signed) -``` - -Should the above information be provided by e-mail, the e-mail **must be** digitally signed. To sign an e-mail, you need digital certificate. Read more on [digital signatures](obtaining-login-credentials/#the-certificates-for-digital-signatures) below. - -Digital signature allows us to confirm your identity in remote electronic communication and provides an encrypted channel to exchange sensitive information such as login credentials. After receiving your signed e-mail with the requested information, we will send you your login credentials (user name, key, passphrase and password) to access the IT4I systems. - -We accept certificates issued by any widely respected certification authority. - -For various reasons we do not accept PGP keys.** Please, use only X.509 PKI certificates for communication with us.** - -You will receive your personal login credentials by protected e-mail. The login credentials include: - -1. username -2. ssh private key and private key passphrase -3. system password - -The clusters are accessed by the [private key](../accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) and username. Username and password is used for login to the information systems listed on <http://support.it4i.cz/>. - -Change Passphrase ------------------ - -On Linux, use - -```bash -local $ ssh-keygen -f id_rsa -p -``` - -On Windows, use [PuTTY Key Generator](../accessing-the-clusters/shell-access-and-data-transfer/puttygen/). - -Change Password ---------------- - -Change password in your user profile at <https://extranet.it4i.cz/user/> - -The Certificates for Digital Signatures ---------------------------------------- - -We accept personal certificates issued by any widely respected certification authority (CA). This includes certificates by CAs organized in International Grid Trust Federation (<http://www.igtf.net/>), its European branch EUGridPMA - <https://www.eugridpma.org/> and its member organizations, e.g. the CESNET certification authority - <https://tcs-p.cesnet.cz/confusa/>. The Czech *"Qualified certificate" (Kvalifikovaný certifikát)* (provided by <http://www.postsignum.cz/> or <http://www.ica.cz/Kvalifikovany-certifikat.aspx>), that is used in electronic contact with Czech authorities is accepted as well. - -Certificate generation process is well-described here: - -- [How to generate a personal TCS certificate in Mozilla Firefox web browser (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/moz-cert-gen) - -A FAQ about certificates can be found here: [Certificates FAQ](certificates-faq/). - -Alternative Way to Personal Certificate ---------------------------------------- - -Follow these steps **only** if you can not obtain your certificate in a standard way. In case you choose this procedure, please attach a **scan of photo ID** (personal ID or passport or drivers license) when applying for [login credentials](obtaining-login-credentials/#the-login-credentials). - -1. Go to <https://www.cacert.org/>. - - If there's a security warning, just acknowledge it. -2. Click *Join*. -3. Fill in the form and submit it by the *Next* button. - - Type in the e-mail address which you use for communication with us. - - Don't forget your chosen *Pass Phrase*. -4. You will receive an e-mail verification link. Follow it. -5. After verifying, go to the CAcert's homepage and login using *Password Login*. -6. Go to *Client Certificates* -> *New*. -7. Tick *Add* for your e-mail address and click the *Next* button. -8. Click the *Create Certificate Request* button. -9. You'll be redirected to a page from where you can download/install your certificate. - - Simultaneously you'll get an e-mail with a link to the certificate. - -Installation of the Certificate Into Your Mail Client ------------------------------------------------------ - -The procedure is similar to the following guides: - -MS Outlook 2010 - -- [How to Remove, Import, and Export Digital certificates](http://support.microsoft.com/kb/179380) -- [Importing a PKCS #12 certificate (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/outl-cert-imp) - -Mozilla Thudnerbird - -- [Installing an SMIME certificate](http://kb.mozillazine.org/Installing_an_SMIME_certificate) -- [Importing a PKCS #12 certificate (in Czech)](http://idoc.vsb.cz/xwiki/wiki/infra/view/uzivatel/moz-cert-imp) - -End of User Account Lifecycle ------------------------------ - -User accounts are supported by membership in active Project(s) or by affiliation to IT4Innovations. User accounts, that loose the support (meaning, are not attached to an active project and are not affiliated with IT4I), will be deleted 1 year after the last project to which they were attached expires. - -User will get 3 automatically generated warning e-mail messages of the pending removal:. - -- First message will be sent 3 months before the removal -- Second message will be sent 1 month before the removal -- Third message will be sent 1 week before the removal. - -The messages will inform about the projected removal date and will challenge the user to migrate her/his data diff --git a/docs.it4i/img/cn_m_cell b/docs.it4i/img/cn_m_cell.jpg similarity index 100% rename from docs.it4i/img/cn_m_cell rename to docs.it4i/img/cn_m_cell.jpg diff --git a/docs.it4i/img/cn_mic b/docs.it4i/img/cn_mic deleted file mode 100644 index 4e895b76c3c7b94ae1062eb6e06392ba15242338..0000000000000000000000000000000000000000 Binary files a/docs.it4i/img/cn_mic and /dev/null differ diff --git a/docs.it4i/img/cn_mic-1 b/docs.it4i/img/cn_mic-1.jpg similarity index 100% rename from docs.it4i/img/cn_mic-1 rename to docs.it4i/img/cn_mic-1.jpg diff --git a/docs.it4i/img/cn_mic.jpg b/docs.it4i/img/cn_mic.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b9f2f8326ca9c0c0e093aad12a0c80d8a28444f Binary files /dev/null and b/docs.it4i/img/cn_mic.jpg differ diff --git a/docs.it4i/img/salomon-2 b/docs.it4i/img/salomon-2.jpg similarity index 100% rename from docs.it4i/img/salomon-2 rename to docs.it4i/img/salomon-2.jpg diff --git a/docs.it4i/img/salomon b/docs.it4i/img/salomon.jpg similarity index 100% rename from docs.it4i/img/salomon rename to docs.it4i/img/salomon.jpg diff --git a/docs.it4i/img/virtualization-job-workflow b/docs.it4i/img/virtualization-job-workflow.png similarity index 100% rename from docs.it4i/img/virtualization-job-workflow rename to docs.it4i/img/virtualization-job-workflow.png diff --git a/docs.it4i/index.md b/docs.it4i/index.md index e962135248a6e396e1fcc27ca6787d8da3ec533c..7e97161c12a16c0a8bec4540a77760cebf122063 100644 --- a/docs.it4i/index.md +++ b/docs.it4i/index.md @@ -1,51 +1,48 @@ -Documentation -============= +# Documentation -Welcome to IT4Innovations documentation pages. The IT4Innovations national supercomputing center operates supercomputers [Salomon](/salomon/introduction/) and [Anselm](/anselm-cluster-documentation/introduction/). The supercomputers are [available](get-started-with-it4innovations/applying-for-resources/) to academic community within the Czech Republic and Europe and industrial community worldwide. The purpose of these pages is to provide a comprehensive documentation on hardware, software and usage of the computers. +Welcome to IT4Innovations documentation pages. The IT4Innovations national supercomputing center operates supercomputers [Salomon](/salomon/introduction/) and [Anselm](/anselm/introduction/). The supercomputers are [available](general/applying-for-resources/) to academic community within the Czech Republic and Europe and industrial community worldwide. The purpose of these pages is to provide a comprehensive documentation on hardware, software and usage of the computers. - How to read the documentation ------------------------------- +## How to Read the Documentation 1. Read the list in the left column. Select the subject of interest. Alternatively, use the Search in the upper right corner. 1. Scan for all the notes and reminders on the page. 1. Read the details if still more information is needed. **Look for examples** illustrating the concepts. -Getting Help and Support ------------------------- -!!! Note "Note" - Contact [support [at] it4i.cz](mailto:support%20%5Bat%5D%20it4i.cz) for help and support regarding the cluster technology at IT4Innovations. Please use **Czech**, **Slovak** or **English** language for communication with us. Follow the status of your request to IT4Innovations at [support.it4i.cz/rt](http://support.it4i.cz/rt). +## Getting Help and Support + +!!! note + Contact [support\[at\]it4i.cz](mailto:support@it4i.cz) for help and support regarding the cluster technology at IT4Innovations. Please use **Czech**, **Slovak** or **English** language for communication with us. Follow the status of your request to IT4Innovations at [support.it4i.cz/rt](http://support.it4i.cz/rt). Use your IT4Innotations username and password to log in to the [support](http://support.it4i.cz/) portal. -Required Proficiency --------------------- -!!! Note "Note" - You need basic proficiency in Linux environment. +## Required Proficiency + +!!! note + You need basic proficiency in Linux environment. In order to use the system for your calculations, you need basic proficiency in Linux environment. To gain the proficiency, we recommend you reading the [introduction to Linux](http://www.tldp.org/LDP/intro-linux/html/) operating system environment and installing a Linux distribution on your personal computer. A good choice might be the [CentOS](http://www.centos.org/) distribution, as it is similar to systems on the clusters at IT4Innovations. It's easy to install and use. In fact, any distribution would do. -!!! Note "Note" - Learn how to parallelize your code! +!!! note + Learn how to parallelize your code! In many cases, you will run your own code on the cluster. In order to fully exploit the cluster, you will need to carefully consider how to utilize all the cores available on the node and how to use multiple nodes at the same time. You need to **parallelize** your code. Proficieny in MPI, OpenMP, CUDA, UPC or GPI2 programming may be gained via the [training provided by IT4Innovations.](http://prace.it4i.cz) -Terminology Frequently Used on These Pages ------------------------------------------- - -- **node:** a computer, interconnected by network to other computers - Computational nodes are powerful computers, designed and dedicated for executing demanding scientific computations. -- **core:** processor core, a unit of processor, executing computations -- **corehours:** wall clock hours of processor core time - Each node is equipped with **X** processor cores, provides **X** corehours per 1 wall clock hour. -- **job:** a calculation running on the supercomputer - The job allocates and utilizes resources of the supercomputer for certain time. -- **HPC:** High Performance Computing -- **HPC (computational) resources:** corehours, storage capacity, software licences -- **code:** a program -- **primary investigator (PI):** a person responsible for execution of computational project and utilization of computational resources allocated to that project -- **collaborator:** a person participating on execution of computational project and utilization of computational resources allocated to that project -- **project:** a computational project under investigation by the PI - The project is identified by the project ID. The computational resources are allocated and charged per project. -- **jobscript:** a script to be executed by the PBS Professional workload manager - -Conventions ------------ +## Terminology Frequently Used on These Pages + +* **node:** a computer, interconnected by network to other computers - Computational nodes are powerful computers, designed and dedicated for executing demanding scientific computations. +* **core:** processor core, a unit of processor, executing computations +* **corehours:** wall clock hours of processor core time - Each node is equipped with **X** processor cores, provides **X** corehours per 1 wall clock hour. +* **job:** a calculation running on the supercomputer - The job allocates and utilizes resources of the supercomputer for certain time. +* **HPC:** High Performance Computing +* **HPC (computational) resources:** corehours, storage capacity, software licences +* **code:** a program +* **primary investigator (PI):** a person responsible for execution of computational project and utilization of computational resources allocated to that project +* **collaborator:** a person participating on execution of computational project and utilization of computational resources allocated to that project +* **project:** a computational project under investigation by the PI - The project is identified by the project ID. The computational resources are allocated and charged per project. +* **jobscript:** a script to be executed by the PBS Professional workload manager + +## Conventions + In this documentation, you will find a number of pages containing examples. We use the following conventions: Cluster command prompt @@ -60,6 +57,6 @@ Your local linux host command prompt local $ ``` -Errata -------- -Although we have taken every care to ensure the accuracy of our content, mistakes do happen. If you find a mistake in the text or the code we would be grateful if you would report this to us. By doing so, you can save other readers from frustration and help us improve subsequent versions of this documentation. If you find any errata, please report them by visiting [http://support.it4i.cz/rt](http://support.it4i.cz/rt), creating a new ticket, and entering the details of your errata. Once your errata are verified, your submission will be accepted and the errata will be uploaded on our website. \ No newline at end of file +## Errata + +Although we have taken every care to ensure the accuracy of our content, mistakes do happen. If you find a mistake in the text or the code we would be grateful if you would report this to us. By doing so, you can save other readers from frustration and help us improve subsequent versions of this documentation. If you find any errata, please report them by visiting <http://support.it4i.cz/rt>, creating a new ticket, and entering the details of your errata. Once your errata are verified, your submission will be accepted and the errata will be uploaded on our website. diff --git a/docs.it4i/modules-anselm.md b/docs.it4i/modules-anselm.md index d65c3e72baf16323ead6357ec6455542e63da446..d2e04c5900981acf1344800a46a80a3252c77588 100644 --- a/docs.it4i/modules-anselm.md +++ b/docs.it4i/modules-anselm.md @@ -1,430 +1,430 @@ -# List of Available Modules +# Available Modules ## Core -|Module|Description|Available versions| -|--|--|--| -|**lmod**| |<nobr>7.2.2.lua</nobr>| -|**settarg**| |<nobr>7.2.2.lua</nobr>| +| Module | Description | +| ------ | ----------- | +| lmod | | +| settarg | | ## Bio -|Module|Description|Available versions| -|--|--|--| -|**[almost](http://www-almost.ch.cam.ac.uk/site)**|all atom molecular simulation toolkit - is a fast and flexible molecular modeling environment that provides powerful and efficient algorithms for molecular simulation, homology modeling, de novo design and ab-initio calculations.|<nobr>2.1.0-foss-2015g</br>2.1.0-foss-2016a</nobr>| -|**bowtie2**| |<nobr>2.2.3</nobr>| -|**[GROMACS](http://www.gromacs.org)**|GROMACS is a versatile package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles.|<nobr>5.1.2-intel-2015b-hybrid-single-cuda</br>5.1.2-intel-2016a-hybrid</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1</br>5.1.2-intel-2015b-hybrid-single-CUDA-7.5-PLUMED-2.2.1-test</nobr>| -|**[PLUMED](http://www.plumed-code.org)**|PLUMED is an open source library for free energy calculations in molecular systems which works together with some of the most popular molecular dynamics engines. Free energy calculations can be performed as a function of many order parameters with a particular focus on biological problems, using state of the art methods such as metadynamics, umbrella sampling and Jarzynski-equation based steered MD. The software, written in C++, can be easily interfaced with both fortran and C/C++ codes.|<nobr>2.3b-foss-2016a</nobr>| +| Module | Description | +| ------ | ----------- | +| [almost](http://www-almost.ch.cam.ac.uk/site) | all atom molecular simulation toolkit - is a fast and flexible molecular modeling environment that provides powerful and efficient algorithms for molecular simulation, homology modeling, de novo design and ab-initio calculations. | +| bowtie2 | | +| [GROMACS](http://www.gromacs.org) | GROMACS is a versatile package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles. | +| [PLUMED](http://www.plumed-code.org) | PLUMED is an open source library for free energy calculations in molecular systems which works together with some of the most popular molecular dynamics engines. Free energy calculations can be performed as a function of many order parameters with a particular focus on biological problems, using state of the art methods such as metadynamics, umbrella sampling and Jarzynski-equation based steered MD. The software, written in C++, can be easily interfaced with both fortran and C/C++ codes. | ## Bullxde -|Module|Description|Available versions| -|--|--|--| -|**bullxde**| |<nobr>2.0</nobr>| +| Module | Description | +| ------ | ----------- | +| bullxde | | ## Bullxmpi -|Module|Description|Available versions| -|--|--|--| -|**bullxmpi**| |<nobr>bullxmpi-1.2.4.3</nobr>| +| Module | Description | +| ------ | ----------- | +| bullxmpi | | ## Chem -|Module|Description|Available versions| -|--|--|--| -|**abinit**| |<nobr>7.10.1-icc-impi</br>7.10.1-gcc-openmpi</br>7.6.2</nobr>| -|**cp2k-mpi**| |<nobr>2.5.1-gcc</nobr>| -|**lammps**| |<nobr>28Jun14</nobr>| -|**molpro**| |<nobr>2010.1-p45-intel</nobr>| -|**namd**| |<nobr>2.8</nobr>| -|**nwchem**| |<nobr>6.1.1</br>6.3-rev2-patch1-openmpi</br>6.3-rev2-patch1-venus</br>6.3-rev2-patch1</nobr>| -|**[ORCA](http://cec.mpg.de/forum/)**|ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry with specific emphasis on spectroscopic properties of open-shell molecules. It features a wide variety of standard quantum chemical methods ranging from semiempirical methods to DFT to single- and multireference correlated ab initio methods. It can also treat environmental and relativistic effects.|<nobr>3_0_3-linux_x86-64</nobr>| -|**[PLUMED](http://www.plumed-code.org)**|PLUMED is an open source library for free energy calculations in molecular systems which works together with some of the most popular molecular dynamics engines. Free energy calculations can be performed as a function of many order parameters with a particular focus on biological problems, using state of the art methods such as metadynamics, umbrella sampling and Jarzynski-equation based steered MD. The software, written in C++, can be easily interfaced with both fortran and C/C++ codes.|<nobr>2.2.1-intel-2015b</nobr>| -|**[QuantumESPRESSO](http://www.pwscf.org/)**|Quantum ESPRESSO is an integrated suite of computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials (both norm-conserving and ultrasoft).|<nobr>5.4.0-intel-2017.00</nobr>| -|**[xdrfile](http://www.gromacs.org/Developer_Zone/Programming_Guide/XTC_Library)**|XTC library|<nobr>1.1.4-foss-2016a</br>1.1.4-foss-2015g</br>1.1.4-intel-2015b</nobr>| +| Module | Description | +| ------ | ----------- | +| abinit | | +| cp2k-mpi | | +| lammps | | +| molpro | | +| namd | | +| nwchem | | +| [ORCA](http://cec.mpg.de/forum/) | ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry with specific emphasis on spectroscopic properties of open-shell molecules. It features a wide variety of standard quantum chemical methods ranging from semiempirical methods to DFT to single- and multireference correlated ab initio methods. It can also treat environmental and relativistic effects. | +| [PLUMED](http://www.plumed-code.org) | PLUMED is an open source library for free energy calculations in molecular systems which works together with some of the most popular molecular dynamics engines. Free energy calculations can be performed as a function of many order parameters with a particular focus on biological problems, using state of the art methods such as metadynamics, umbrella sampling and Jarzynski-equation based steered MD. The software, written in C++, can be easily interfaced with both fortran and C/C++ codes. | +| [QuantumESPRESSO](http://www.pwscf.org/) | Quantum ESPRESSO is an integrated suite of computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials (both norm-conserving and ultrasoft). | +| [xdrfile](http://www.gromacs.org/Developer_Zone/Programming_Guide/XTC_Library) | XTC library | ## Compilers -|Module|Description|Available versions| -|--|--|--| -|**bupc**| |<nobr>2.16.2</nobr>| -|**chicken**| |<nobr>4.8.0.6</nobr>| -|**gcc**| |<nobr>4.9.0</br>5.4.0</br>4.8.1</nobr>| -|**[GCC](http://gcc.gnu.org/)**|The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...).|<nobr>4.9.3-binutils-2.25</br>5.1.0-binutils-2.25</br>5.3.0-2.26</br>4.9.3</br>4.9.3-2.25</br>5.3.0-binutils-2.25</nobr>| -|**[GCCcore](http://gcc.gnu.org/)**|The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...).|<nobr>5.3.0</br>4.9.3</br>5.4.0</nobr>| -|**gupc**| |<nobr>4.8.0.3</nobr>| -|**[icc](http://software.intel.com/en-us/intel-compilers/)**|C and C++ compiler from Intel|<nobr>2015.3.187-GNU-5.1.0-2.25</br>2017.0.098-GCC-5.4.0-2.26</br>2016.1.150-GCC-4.9.3-2.25</br>2015.3.187-GNU-4.9.3-2.25</nobr>| -|**[ifort](http://software.intel.com/en-us/intel-compilers/)**|Fortran compiler from Intel|<nobr>2015.3.187-GNU-5.1.0-2.25</br>2017.0.098-GCC-5.4.0-2.26</br>2016.1.150-GCC-4.9.3-2.25</br>2015.3.187-GNU-4.9.3-2.25</nobr>| -|**java**| |<nobr>1.7</nobr>| -|**llvm**| |<nobr>3.6.0</nobr>| +| Module | Description | +| ------ | ----------- | +| bupc | | +| chicken | | +| gcc | | +| [GCC](http://gcc.gnu.org/) | The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). | +| [GCCcore](http://gcc.gnu.org/) | The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). | +| gupc | | +| [icc](http://software.intel.com/en-us/intel-compilers/) | C and C++ compiler from Intel | +| [ifort](http://software.intel.com/en-us/intel-compilers/) | Fortran compiler from Intel | +| java | | +| llvm | | ## Data -|Module|Description|Available versions| -|--|--|--| -|**[GDAL](http://www.gdal.org/)**|GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing.|<nobr>2.1.0-foss-2015g</br>1.9.2-foss-2015g</nobr>| -|**[HDF5](http://www.hdfgroup.org/HDF5/)**|HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections.|<nobr>1.8.16-foss-2016a</br>1.8.16-intel-2016.01</br>1.8.16-intel-2015b</nobr>| +| Module | Description | +| ------ | ----------- | +| [GDAL](http://www.gdal.org/) | GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing. | +| [HDF5](http://www.hdfgroup.org/HDF5/) | HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections. | ## Debugger -|Module|Description|Available versions| -|--|--|--| -|**[Forge](http://www.allinea.com/products/develop-allinea-forge)**|Allinea Forge is the complete toolsuite for software development - with everything needed to debug, profile, optimize, edit and build C, C++ and FORTRAN applications on Linux for high performance - from single threads through to complex parallel HPC codes with MPI, OpenMP, threads or CUDA.|<nobr>6.0.6</br>5.7</br>6.1.2.lua</br>6.0.5</br>5.1-43967</nobr>| -|**[PerformanceReports](http://www.allinea.com/products/allinea-performance-reports)**|Allinea Performance Reports are the most effective way to characterize and understand the performance of HPC application runs. One single-page HTML report elegantly answers a range of vital questions for any HPC site. - Is this application well-optimized for the system and the processors it is running on? - Does it benefit from running at this scale? - Are there I/O, networking or threading bottlenecks affecting performance? - Which hardware, software or configuration changes can we make to improve performance further. - How much energy did this application use?|<nobr>6.0.6</nobr>| +| Module | Description | +| ------ | ----------- | +| [Forge](http://www.allinea.com/products/develop-allinea-forge) | Allinea Forge is the complete toolsuite for software development - with everything needed to debug, profile, optimize, edit and build C, C++ and FORTRAN applications on Linux for high performance - from single threads through to complex parallel HPC codes with MPI, OpenMP, threads or CUDA. | +| [PerformanceReports](http://www.allinea.com/products/allinea-performance-reports) | Allinea Performance Reports are the most effective way to characterize and understand the performance of HPC application runs. One single-page HTML report elegantly answers a range of vital questions for any HPC site. - Is this application well-optimized for the system and the processors it is running on? - Does it benefit from running at this scale? - Are there I/O, networking or threading bottlenecks affecting performance? - Which hardware, software or configuration changes can we make to improve performance further. - How much energy did this application use? | ## Devel -|Module|Description|Available versions| -|--|--|--| -|**[Autoconf](http://www.gnu.org/software/autoconf/)**|Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls.|<nobr>2.69</br>2.69-GNU-4.9.3-2.25</br>2.69-intel-2015b</br>2.69-intel-2017.00</br>2.69-foss-2016a</br>2.69-GNU-5.1.0-2.25</nobr>| -|**[Automake](http://www.gnu.org/software/automake/automake.html)**|Automake: GNU Standards-compliant Makefile generator|<nobr>1.15-GNU-5.1.0-2.25</br>1.15-foss-2016a</br>1.15-GNU-4.9.3-2.25</br>1.15-intel-2015b</br>1.15</br>1.15-intel-2017.00</nobr>| -|**[Autotools](http://autotools.io)**|This bundle collect the standard GNU build tools: Autoconf, Automake and libtool|<nobr>20150215-GNU-4.9.3-2.25</br>20150215-intel-2017.00</br>20150215-GNU-5.1.0-2.25</br>20150215-intel-2015b</br>20150215-foss-2016a</br>20150215</nobr>| -|**[Boost](http://www.boost.org/)**|Boost provides free peer-reviewed portable C++ source libraries.|<nobr>1.58.0-foss-2015g-Python-2.7.9</br>1.60.0-intel-2016a</br>1.59.0-intel-2015b</br>1.60.0-foss-2015g-Python-2.7.9</br>1.61.0-foss-2016a-serial</br>1.60.0-foss-2015g-Python-2.7.9.lua</nobr>| -|**[CMake](http://www.cmake.org)**|CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software.|<nobr>3.5.2-intel-2016a</br>3.6.2.lua</br>3.4.1-foss-2016a</br>3.5.2-foss-2016a</br>3.3.1-GCC-4.9.3-2.25</br>eb-CMake-3.6.2-gc3pie-job-20165321-UTC-09-53-36.log</br>3.4.1-intel-2015b</br>3.3.1-foss-2016a</br>3.3.1-foss-2015g</br>3.5.2</nobr>| -|**[Doxygen](http://www.doxygen.org)**|Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D.|<nobr>1.8.11</nobr>| -|**[fontsproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 font extension wire protocol|<nobr>2.1.3</nobr>| -|**[guile](http://www.gnu.org/software/guile)**|Guile is the GNU Ubiquitous Intelligent Language for Extensions, the official extension language for the GNU operating system.|<nobr>1.8.8</br>1.8.8-intel-2015b</br>1.8.8-foss-2016a</nobr>| -|**[libSM](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 Session Management library, which allows for applications to both manage sessions, and make use of session managers to save and restore their state for later use.|<nobr>1.2.2</nobr>| -|**[M4](http://www.gnu.org/software/m4/m4.html)**|GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc.|<nobr>1.4.17</br>1.4.17-GCCcore-5.3.0</br>1.4.17-GCC-4.9.3</br>1.4.17-GCC-4.9.3-binutils-2.25</br>1.4.17-GCCcore-5.4.0</br>1.4.17-foss-2016a</br>1.4.16-intel-2015b</br>1.4.17-GNU-4.9.3-2.25</br>1.4.17-GCCcore-4.9.3</br>1.4.17-GNU-5.1.0-2.25</br>1.4.17-intel-2015b</br>1.4.17-intel-2017.00</br>1.4.17-GCC-5.1.0-binutils-2.25</nobr>| -|**[make](http://www.gnu.org/software/make/make.html)**|make-3.82: GNU version of make utility|<nobr>3.82</nobr>| -|**[makedepend](http://www.linuxfromscratch.org/blfs/view/svn/x/makedepend.html)**|The makedepend package contains a C-preprocessor like utility to determine build-time dependencies.|<nobr>1.0.4</nobr>| -|**Maven**| |<nobr>3.3.9.lua</br>3.3.9</nobr>| -|**[ncurses](http://www.gnu.org/software/ncurses/)**|The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses.|<nobr>5.9-intel-2016.01</br>5.9-intel-2015b</br>.6.0</br>5.9-gimkl-2.11.5</br>6.0-foss-2016a</br>5.9-foss-2015g</br>5.9</br>.6.0-intel-2016a.lua</br>5.9-GCC-4.9.3-2.25</br>6.0-intel-2016a</br>6.0-intel-2017.00</br>6.0</br>5.9-GNU-4.9.3-2.25</nobr>| -|**[PCRE](http://www.pcre.org/)**|The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5.|<nobr>8.37</br>8.37-intel-2016.01</br>8.37-gimkl-2.11.5</br>8.37-foss-2015g</nobr>| -|**[pkg-config](http://www.freedesktop.org/wiki/Software/pkg-config/)**|pkg-config is a helper tool used when compiling applications and libraries. It helps you insert the correct compiler options on the command line so an application can use gcc -o test test.c `pkg-config --libs --cflags glib-2.0` for instance, rather than hard-coding values on where to find glib (or other libraries).|<nobr>0.27.1</br>0.27.1-intel-2015b</br>0.29-foss-2016a</nobr>| -|**[Qt](http://qt-project.org/)**|Qt is a comprehensive cross-platform C++ application framework.|<nobr>4.8.6</nobr>| -|**[renderproto](http://www.freedesktop.org/wiki/Software/xlibs)**|Xrender protocol and ancillary headers|<nobr>0.11</nobr>| -|**[SCons](http://www.scons.org/)**|SCons is a software construction tool.|<nobr>2.3.6-foss-2015g-Python-2.7.9</nobr>| -|**[SQLite](http://www.sqlite.org/)**|SQLite: SQL Database Engine in a C Library|<nobr>3.8.8.1-intel-2015b</br>3.8.8.1-foss-2015g</br>3.8.8.1-foss-2016a</br>3.9.2-intel-2017.00</br>3.8.8.1</br>3.9.2-intel-2015b</br>3.8.8.1-intel-2016.01</br>3.9.2-foss-2016a</nobr>| -|**[xbitmaps](http://www.freedesktop.org/wiki/Software/xlibs)**|provides bitmaps for x|<nobr>1.1.1</nobr>| -|**[xcb-proto](http://xcb.freedesktop.org/)**|The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility.|<nobr>1.11-Python-2.7.9</nobr>| -|**[xextproto](http://www.freedesktop.org/wiki/Software/xlibs)**|XExtProto protocol headers.|<nobr>7.3.0</nobr>| -|**[xineramaproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X protocol and ancillary headers for xinerama|<nobr>1.2.1</nobr>| -|**[xorg-macros](http://cgit.freedesktop.org/xorg/util/macros)**|X.org macros utilities.|<nobr>1.17</nobr>| -|**[xproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X protocol and ancillary headers|<nobr>7.0.26</nobr>| -|**[xtrans](http://www.freedesktop.org/wiki/Software/xlibs)**|xtrans includes a number of routines to make X implementations transport-independent; at time of writing, it includes support for UNIX sockets, IPv4, IPv6, and DECnet.|<nobr>1.3.5</br>1.3.4</nobr>| +| Module | Description | +| ------ | ----------- | +| [Autoconf](http://www.gnu.org/software/autoconf/) | Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls. | +| [Automake](http://www.gnu.org/software/automake/automake.html) | Automake: GNU Standards-compliant Makefile generator | +| [Autotools](http://autotools.io) | This bundle collect the standard GNU build tools: Autoconf, Automake and libtool | +| [Boost](http://www.boost.org/) | Boost provides free peer-reviewed portable C++ source libraries. | +| [CMake](http://www.cmake.org) | CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software. | +| [Doxygen](http://www.doxygen.org) | Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D. | +| [fontsproto](http://www.freedesktop.org/wiki/Software/xlibs) | X11 font extension wire protocol | +| [guile](http://www.gnu.org/software/guile) | Guile is the GNU Ubiquitous Intelligent Language for Extensions, the official extension language for the GNU operating system. | +| [libSM](http://www.freedesktop.org/wiki/Software/xlibs) | X11 Session Management library, which allows for applications to both manage sessions, and make use of session managers to save and restore their state for later use. | +| [M4](http://www.gnu.org/software/m4/m4.html) | GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc. | +| [make](http://www.gnu.org/software/make/make.html) | make-3.82: GNU version of make utility | +| [makedepend](http://www.linuxfromscratch.org/blfs/view/svn/x/makedepend.html) | The makedepend package contains a C-preprocessor like utility to determine build-time dependencies. | +| Maven | | +| [ncurses](http://www.gnu.org/software/ncurses/) | The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses. | +| [PCRE](http://www.pcre.org/) | The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. | +| [pkg-config](http://www.freedesktop.org/wiki/Software/pkg-config/) | pkg-config is a helper tool used when compiling applications and libraries. It helps you insert the correct compiler options on the command line so an application can use gcc -o test test.c `pkg-config --libs --cflags glib-2.0` for instance, rather than hard-coding values on where to find glib (or other libraries). | +| [Qt](http://qt-project.org/) | Qt is a comprehensive cross-platform C++ application framework. | +| [renderproto](http://www.freedesktop.org/wiki/Software/xlibs) | Xrender protocol and ancillary headers | +| [SCons](http://www.scons.org/) | SCons is a software construction tool. | +| [SQLite](http://www.sqlite.org/) | SQLite: SQL Database Engine in a C Library | +| [xbitmaps](http://www.freedesktop.org/wiki/Software/xlibs) | provides bitmaps for x | +| [xcb-proto](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. | +| [xextproto](http://www.freedesktop.org/wiki/Software/xlibs) | XExtProto protocol headers. | +| [xineramaproto](http://www.freedesktop.org/wiki/Software/xlibs) | X protocol and ancillary headers for xinerama | +| [xorg-macros](http://cgit.freedesktop.org/xorg/util/macros) | X.org macros utilities. | +| [xproto](http://www.freedesktop.org/wiki/Software/xlibs) | X protocol and ancillary headers | +| [xtrans](http://www.freedesktop.org/wiki/Software/xlibs) | xtrans includes a number of routines to make X implementations transport-independent; at time of writing, it includes support for UNIX sockets, IPv4, IPv6, and DECnet. | ## Engineering -|Module|Description|Available versions| -|--|--|--| -|**adams**| |<nobr>2013.2</nobr>| -|**ansys**| |<nobr>15.0.x</br>16.0.x</br>14.5.x</nobr>| -|**beopest**| |<nobr>12.0.1</br>13.3</br>12.2</nobr>| -|**blender**| |<nobr>2.71</nobr>| -|**Code_Saturne**| |<nobr>3.0.5</nobr>| -|**comsol**| |<nobr>43b-EDU</br>44-EDU</br>50-EDU</br>50-COM</br>44-COM</br>43b-COM</nobr>| -|**digimat**| |<nobr>5.0.1</nobr>| -|**Discovery_Studio**| |<nobr>4.0</nobr>| -|**dytran**| |<nobr>2013.0.1</nobr>| -|**fds**| |<nobr>5.5.3</br>6.svn</br>5.5.3-omp</nobr>| -|**hypermesh**| |<nobr>12.0.110</nobr>| -|**hyperworks**| |<nobr>13.0</nobr>| -|**lsdyna**| |<nobr>7.x.x</nobr>| -|**lsprepost**| |<nobr>4.2</nobr>| -|**lux**| |<nobr>1.3.1</nobr>| -|**marc**| |<nobr>2011</br>2013.1</nobr>| -|**matlab**| |<nobr>R2014a-COM</br>R2013a-COM</br>R2013a-EDU</br>R2014a-EDU</nobr>| -|**maxwell**| |<nobr>3.0</nobr>| -|**modflow-2005**| |<nobr>1.11.00</nobr>| -|**modflow-nwt**| |<nobr>1.0.9-aquaveo</br>1.0.9</nobr>| -|**nastran**| |<nobr>2013.1.1</nobr>| -|**openfoam**| |<nobr>2.2.1-icc-openmpi1.6.5-DP</br>2.2.1-gcc481-openmpi1.6.5-SP</br>2.2.1-gcc481-openmpi1.6.5-DP</br>2.2.2-icc-openmpi1.8.1-DP</br>2.2.1-icc-impi4.1.1.036-DP</nobr>| -|**paraview**| |<nobr>4.0.1-gcc481-bullxmpi1.2.4.1-osmesa10.0</nobr>| -|**pest**| |<nobr>13.0</nobr>| -|**wien2k**| |<nobr>13.1</br>14.2</nobr>| +| Module | Description | +| ------ | ----------- | +| adams | | +| ansys | | +| beopest | | +| blender | | +| Code_Saturne | | +| comsol | | +| digimat | | +| Discovery_Studio | | +| dytran | | +| fds | | +| hypermesh | | +| hyperworks | | +| lsdyna | | +| lsprepost | | +| lux | | +| marc | | +| matlab | | +| maxwell | | +| modflow-2005 | | +| modflow-nwt | | +| nastran | | +| openfoam | | +| paraview | | +| pest | | +| wien2k | | ## Environments -|Module|Description|Available versions| -|--|--|--| -|**bullxde**| |<nobr>2.0</nobr>| -|**PrgEnv-gnu**| |<nobr>4.4.6</br>4.4.6-test</br>4.8.1</nobr>| -|**PrgEnv-intel**| |<nobr>15.0.3</br>13.5.192</br>14.0.1</nobr>| +| Module | Description | +| ------ | ----------- | +| bullxde | | +| PrgEnv-gnu | | +| PrgEnv-intel | | ## Lang -|Module|Description|Available versions| -|--|--|--| -|**[Bison](http://www.gnu.org/software/bison)**|Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables.|<nobr>3.0.4-foss-2016a</br>3.0.4-GCC-4.9.3</br>3.0.4</br>2.7-foss-2015g</br>3.0.2</br>3.0.4-GCCcore-5.4.0</br>2.7</br>3.0.4-intel-2015b</br>2.5-intel-2015b</br>3.0.4-GCC-4.9.3-binutils-2.25</br>3.0.4-GCC-5.1.0-binutils-2.25</br>3.0.4-GCCcore-4.9.3</br>3.0.4-GCCcore-5.3.0</nobr>| -|**[byacc](http://invisible-island.net/byacc/byacc.html)**|Berkeley Yacc (byacc) is generally conceded to be the best yacc variant available. In contrast to bison, it is written to avoid dependencies upon a particular compiler.|<nobr>20150711-intel-2015b</br>20120526</br>20120526-foss-2016a</br>20120526-foss-2015g</br>20120526-intel-2015b</nobr>| -|**[flex](http://flex.sourceforge.net/)**|Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text.|<nobr>2.5.39-foss-2015g</br>2.5.39-GCC-5.1.0-binutils-2.25</br>2.6.0</br>2.5.39-GCC-4.9.3-binutils-2.25</br>2.5.39-GCC-4.9.3</br>2.6.0-GCCcore-5.3.0</br>2.5.35-intel-2015b</br>2.6.0-GCCcore-5.4.0</br>2.5.39</br>2.5.39-foss-2016a</br>2.5.39-GCCcore-4.9.3</br>2.5.39-intel-2015b</nobr>| -|**[Java](http://java.com/)**|Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers.|<nobr>1.8.0_51</br>1.7.0_79.lua</br>1.7.0_79</nobr>| -|**[libgdiplus](https://github.com/mono/libgdiplus)**|An Open Source implementation of the GDI+ API.|<nobr>3.12</nobr>| -|**[Lua](http://www.lua.org/)**|Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping.|<nobr>.5.1.4-8</br>5.1.4-8.lua</nobr>| -|**[NASM](http://www.nasm.us/)**|NASM: General-purpose x86 assembler|<nobr>2.11.05</nobr>| -|**Perl**| |<nobr>5.24.0-GCC-4.9.3-2.25-bare.lua</br>5.20.2-GNU-4.9.3-2.25-bare</nobr>| -|**[Python](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.|<nobr>3.5.2-foss-2016a</br>2.7.9-intel-2015b</br>2.7.9-foss-2015g</br>2.7.9-intel-2016.01</br>3.5.2-intel-2017.00</br>2.7.11-intel-2015b</br>2.7.9</nobr>| -|**[Ruby](https://www.ruby-lang.org)**|Ruby is a dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write.|<nobr>2.3.1</nobr>| -|**[Tcl](http://www.tcl.tk/)**|Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more.|<nobr>8.6.4</br>8.6.4-intel-2017.00</br>8.6.3-intel-2016.01</br>8.6.3-foss-2016a</br>8.6.3-intel-2015b</br>8.6.4-foss-2016a</br>8.6.4-intel-2015b</br>8.6.3</br>8.6.3-foss-2015g</br>8.5.12</nobr>| +| Module | Description | +| ------ | ----------- | +| [Bison](http://www.gnu.org/software/bison) | Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. | +| [byacc](http://invisible-island.net/byacc/byacc.html) | Berkeley Yacc (byacc) is generally conceded to be the best yacc variant available. In contrast to bison, it is written to avoid dependencies upon a particular compiler. | +| [flex](http://flex.sourceforge.net/) | Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text. | +| [Java](http://java.com/) | Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers. | +| [libgdiplus](https://github.com/mono/libgdiplus) | An Open Source implementation of the GDI+ API. | +| [Lua](http://www.lua.org/) | Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping. | +| [NASM](http://www.nasm.us/) | NASM: General-purpose x86 assembler | +| Perl | | +| [Python](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| [Ruby](https://www.ruby-lang.org) | Ruby is a dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write. | +| [Tcl](http://www.tcl.tk/) | Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more. | ## Lib -|Module|Description|Available versions| -|--|--|--| -|**[libdrm](http://dri.freedesktop.org)**|Direct Rendering Manager runtime library.|<nobr>2.4.27</nobr>| -|**[libffi](http://sourceware.org/libffi/)**|The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time.|<nobr>3.2.1-foss-2016a</br>3.0.13</br>3.1-intel-2015b</br>3.0.13-intel-2015b</br>3.1-intel-2016.01</nobr>| -|**[libfontenc](http://www.freedesktop.org/wiki/Software/xlibs/)**|X11 font encoding library|<nobr>1.1.3</nobr>| -|**[libjpeg-turbo](http://sourceforge.net/projects/libjpeg-turbo/)**|libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding.|<nobr>1.4.0</nobr>| -|**[libmatheval](http://www.gnu.org/software/libmatheval/)**|GNU libmatheval is a library (callable from C and Fortran) to parse and evaluate symbolic expressions input as text.|<nobr>1.1.8</br>1.1.8-foss-2016a</br>1.1.8-intel-2015b</br>1.1.11-intel-2015b</nobr>| -|**[libpng](http://www.libpng.org/pub/png/libpng.html)**|libpng is the official PNG reference library|<nobr>1.6.12</nobr>| -|**[libpthread-stubs](http://xcb.freedesktop.org/)**|The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility.|<nobr>0.3</nobr>| -|**[libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html)**|The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands.|<nobr>.6.3</br>6.3-intel-2016.01</br>6.3-foss-2015g</br>6.3-foss-2016a</br>6.3-intel-2017.00</br>6.3-gimkl-2.11.5</br>6.3-intel-2015b</br>6.3</nobr>| -|**[LibTIFF](http://www.remotesensing.org/libtiff/)**|tiff: Library and tools for reading and writing TIFF data files|<nobr>4.0.3</nobr>| -|**[libtool](http://www.gnu.org/software/libtool)**|GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface.|<nobr>2.4.2</br>2.4.6-foss-2016a</br>2.4.2-foss-2015g</br>2.4.6-intel-2017.00</br>2.4.6-intel-2015b</br>2.4.6-GNU-5.1.0-2.25</br>2.4.6</br>2.4.6-GNU-4.9.3-2.25</nobr>| -|**[libunistring](http://www.gnu.org/software/libunistring/)**|This library provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard.|<nobr>0.9.3</br>0.9.3-foss-2016a</br>0.9.3-intel-2015b</nobr>| -|**[libxcb](http://xcb.freedesktop.org/)**|The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility.|<nobr>1.11-Python-2.7.9</nobr>| -|**[libxml2](http://xmlsoft.org/)**|Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform).|<nobr>2.9.3-intel-2016a</br>2.9.3-foss-2016a</br>2.9.2-intel-2015b</nobr>| -|**[SIONlib](http://www.fz-juelich.de/ias/jsc/EN/Expertise/Support/Software/SIONlib/_node.html)**|SIONlib is a scalable I/O library for parallel access to task-local files. The library not only supports writing and reading binary data to or from several thousands of processors into a single or a small number of physical files, but also provides global open and close functions to access SIONlib files in parallel. This package provides a stripped-down installation of SIONlib for use with performance tools (e.g., Score-P), with renamed symbols to avoid conflicts when an application using SIONlib itself is linked against a tool requiring a different SIONlib version.|<nobr>1.6.1-tools</nobr>| -|**[spGPU](https://github.com/davidebarbieri/spgpu)**|spGPU is a set of custom matrix storages and CUDA kernels for sparse linear algebra computing on GPU. It isn't a replacement for cuBLAS/cuSPARSE that should be used for a full featured linear algebra environment on GPU.|<nobr>master-GCC-4.9.3-2.25</nobr>| -|**tbb**| |<nobr>4.4.2.152.lua</br>4.4.2.152</nobr>| -|**zlib**| |<nobr>.1.2.8-foss-2015g.lua</br>1.2.8-intel-2015b</br>1.2.8-GCC-5.1.0-binutils-2.25</br>1.2.8-foss-2016a</br>1.2.8-intel-2017.00</br>1.2.8-GCC-4.9.3-binutils-2.25</br>1.2.8-GCCcore-4.9.3</br>1.2.8-foss-2015g</br>1.2.8</br>1.2.8-intel-2016a</br>1.2.8-intel-2016.01</br>1.2.8-GCC-4.9.3</br>1.2.8-GCCcore-5.3.0</br>1.2.8-GCCcore-5.4.0</nobr>| +| Module | Description | +| ------ | ----------- | +| [libdrm](http://dri.freedesktop.org) | Direct Rendering Manager runtime library. | +| [libffi](http://sourceware.org/libffi/) | The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time. | +| [libfontenc](http://www.freedesktop.org/wiki/Software/xlibs/) | X11 font encoding library | +| [libjpeg-turbo](http://sourceforge.net/projects/libjpeg-turbo/) | libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding. | +| [libmatheval](http://www.gnu.org/software/libmatheval/) | GNU libmatheval is a library (callable from C and Fortran) to parse and evaluate symbolic expressions input as text. | +| [libpng](http://www.libpng.org/pub/png/libpng.html) | libpng is the official PNG reference library | +| [libpthread-stubs](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. | +| [libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html) | The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands. | +| [LibTIFF](http://www.remotesensing.org/libtiff/) | tiff: Library and tools for reading and writing TIFF data files | +| [libtool](http://www.gnu.org/software/libtool) | GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface. | +| [libunistring](http://www.gnu.org/software/libunistring/) | This library provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard. | +| [libxcb](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. | +| [libxml2](http://xmlsoft.org/) | Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform). | +| [SIONlib](http://www.fz-juelich.de/ias/jsc/EN/Expertise/Support/Software/SIONlib/_node.html) | SIONlib is a scalable I/O library for parallel access to task-local files. The library not only supports writing and reading binary data to or from several thousands of processors into a single or a small number of physical files, but also provides global open and close functions to access SIONlib files in parallel. This package provides a stripped-down installation of SIONlib for use with performance tools (e.g., Score-P), with renamed symbols to avoid conflicts when an application using SIONlib itself is linked against a tool requiring a different SIONlib version. | +| [spGPU](https://github.com/davidebarbieri/spgpu) | spGPU is a set of custom matrix storages and CUDA kernels for sparse linear algebra computing on GPU. It isn't a replacement for cuBLAS/cuSPARSE that should be used for a full featured linear algebra environment on GPU. | +| tbb | | +| zlib | | ## Libraries -|Module|Description|Available versions| -|--|--|--| -|**adios**| |<nobr>1.8.0</nobr>| -|**boost**| |<nobr>1.56-icc-impi</br>1.56-gcc-openmpi</nobr>| -|**dataspaces**| |<nobr>1.4.0</nobr>| -|**fftw2**| |<nobr>2.1.5-icc</br>2.1.5-gcc</nobr>| -|**fftw2-mpi**| |<nobr>2.1.5-icc</br>2.1.5-gcc</nobr>| -|**fftw3**| |<nobr>3.3.3-gcc</br>3.3.3-icc</nobr>| -|**fftw3-mpi**| |<nobr>3.3.3-gcc</br>3.3.3-icc</nobr>| -|**gpi2**| |<nobr>1.0.2</br>1.1.1</br>1.1.0</nobr>| -|**gsl**| |<nobr>1.16-icc</br>1.16-gcc</nobr>| -|**hdf5**| |<nobr>1.8.13</br>1.8.11</nobr>| -|**hdf5-parallel**| |<nobr>1.8.11-gcc</br>1.8.13-gcc</br>1.8.13</br>1.8.13-gcc49</br>1.8.11</nobr>| -|**ipp**| |<nobr>13.5.192</br>15.3.187</br>14.0.1</br>15.2.164</nobr>| -|**libmesh**| |<nobr>0.9.3-petsc-3.4.4-icc-impi-mkl-dbg</br>0.9.3-petsc-3.4.4-icc-impi-mkl-dbg-2d</br>0.9.3-petsc-3.4.4-icc-impi-mkl-opt</nobr>| -|**magma**| |<nobr>.common-mic</br>1.3.0-mic</br>1.1.0-mic</nobr>| -|**mkl**| |<nobr>13.5.192</br>15.3.187</br>14.0.1</br>15.2.164</nobr>| -|**mxml**| |<nobr>2.9</nobr>| -|**netcdf**| |<nobr>4.2.1.1</br>4.3.0</nobr>| -|**netcdf-cxx**| |<nobr>4.2</nobr>| -|**netcdf-fortran**| |<nobr>4.2</nobr>| -|**netcdf-parallel**| |<nobr>4.3.0</nobr>| -|**opencl-rt**| |<nobr>4.5.0.8</nobr>| -|**opencl-sdk**| |<nobr>4.6.0.92</nobr>| -|**petsc**| |<nobr>3.5.3-icc15-impi-mkl-threads-dbg</br>3.5.3-icc15-impi-mkl-threads-opt</br>3.7.3-icc16-impi5-mkl-dbg</br>3.5.3-icc15-impi-mkl-opt</br>3.7.3-icc16-impi5-mkl-opt</br>3.5.3-icc15-impi-mkl-dbg</br>.common_help</br>.common_petsc_pre</br>.common_petsc_post</br>.common_functions</nobr>| -|**plasma**| |<nobr>2.6.0</nobr>| -|**slepc**| |<nobr>3.7.2-icc16-impi5-mkl-opt</br>3.7.2-icc16-impi5-mkl-dbg</br>3.4.4-icc15-impi-mkl-opt</br>3.4.4-icc15-impi-mkl-dbg</nobr>| -|**szip**| |<nobr>2.1</nobr>| -|**tbb**| |<nobr>13.5.192</br>15.3.187</br>14.0.1</br>15.2.164</nobr>| -|**trilinos**| |<nobr>11.2.3-gcc-openmpi-mkl-dbg</br>11.2.3-icc</br>11.2.3-gcc-openmpi-mkl-opt</nobr>| -|**zlib**| |<nobr>1.2.5</br>1.2.8</nobr>| +| Module | Description | +| ------ | ----------- | +| adios | | +| boost | | +| dataspaces | | +| fftw2 | | +| fftw2-mpi | | +| fftw3 | | +| fftw3-mpi | | +| gpi2 | | +| gsl | | +| hdf5 | | +| hdf5-parallel | | +| ipp | | +| libmesh | | +| magma | | +| mkl | | +| mxml | | +| netcdf | | +| netcdf-cxx | | +| netcdf-fortran | | +| netcdf-parallel | | +| opencl-rt | | +| opencl-sdk | | +| petsc | | +| plasma | | +| slepc | | +| szip | | +| tbb | | +| trilinos | | +| zlib | | ## Math -|Module|Description|Available versions| -|--|--|--| -|**[GMP](http://gmplib.org/)**|GMP is a free library for arbitrary precision arithmetic, operating on signed integers, rational numbers, and floating point numbers.|<nobr>6.1.0-intel-2015b</br>5.0.5</br>6.1.0-foss-2016a</br>6.0.0a</br>6.0.0a-intel-2015b</br>6.1.0-intel-2017.00</br>5.0.5-foss-2015g</nobr>| -|**[ISL](http://isl.gforge.inria.fr/)**|isl is a library for manipulating sets and relations of integer points bounded by linear constraints.|<nobr>0.15</nobr>| -|**[MLD2P4](http://www.mld2p4.it)**|MLD2P4 (Multi-Level Domain Decomposition Parallel Preconditioners Package based on PSBLAS) is a package of parallel algebraic multi-level preconditioners. It implements various versions of one-level additive and of multi-level additive and hybrid Schwarz algorithms. In the multi-level case, a purely algebraic approach is applied to generate coarse-level corrections, so that no geometric background is needed concerning the matrix to be preconditioned. The matrix is assumed to be square, real or complex, with a symmetric sparsity pattern.|<nobr>2.0-rc4-GCC-4.9.3-2.25</nobr>| -|**[numpy](http://www.numpy.org)**|NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object, sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code, useful linear algebra, Fourier transform, and random number capabilities. Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases.|<nobr>1.8.2-intel-2015b-Python-2.7.9</br>1.8.2-intel-2015b-Python-2.7.11</br>1.8.2-intel-2016.01-Python-2.7.9</nobr>| -|**[Octave](http://www.gnu.org/software/octave/)**|GNU Octave is a high-level interpreted language, primarily intended for numerical computations.|<nobr>3.8.2-gimkl-2.11.5</br>4.0.0-foss-2015g</br>4.0.1-gimkl-2.11.5</nobr>| -|**[PSBLAS](http://people.uniroma2.it/salvatore.filippone/psblas/)**|Most computationally intensive applications work on irregular and sparse domains that complicate their implementation on parallel machines. The major goal of the Parallel Sparse Basic Linear Algebra Subroutines (PSBLAS) project is to provide a framework to enable easy, efficient and portable implementations of iterative solvers for linear systems, while shielding the user from most details of their parallelization. The interface is designed keeping in view a Single Program Multiple Data programming model on distributed memory machines.|<nobr>3.3.4-3-GCC-4.9.3-2.25</nobr>| -|**[PSBLAS-ext](http://people.uniroma2.it/salvatore.filippone/psblas/)**|PSBLAS - Extended formats and NVIDIA GPU support|<nobr>1.0-4-GCC-4.9.3-2.25</nobr>| -|**[ScientificPython](https://sourcesup.cru.fr/projects/scientific-py/)**|ScientificPython is a collection of Python modules for scientific computing. It contains support for geometry, mathematical functions, statistics, physical units, IO, visualization, and parallelization.|<nobr>2.9.4-intel-2016.01-Python-2.7.9</br>2.9.4-intel-2015b-Python-2.7.9</br>2.9.4-intel-2015b-Python-2.7.11</nobr>| +| Module | Description | +| ------ | ----------- | +| [GMP](http://gmplib.org/) | GMP is a free library for arbitrary precision arithmetic, operating on signed integers, rational numbers, and floating point numbers. | +| [ISL](http://isl.gforge.inria.fr/) | isl is a library for manipulating sets and relations of integer points bounded by linear constraints. | +| [MLD2P4](http://www.mld2p4.it) | MLD2P4 (Multi-Level Domain Decomposition Parallel Preconditioners Package based on PSBLAS) is a package of parallel algebraic multi-level preconditioners. It implements various versions of one-level additive and of multi-level additive and hybrid Schwarz algorithms. In the multi-level case, a purely algebraic approach is applied to generate coarse-level corrections, so that no geometric background is needed concerning the matrix to be preconditioned. The matrix is assumed to be square, real or complex, with a symmetric sparsity pattern. | +| [numpy](http://www.numpy.org) | NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object, sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code, useful linear algebra, Fourier transform, and random number capabilities. Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases. | +| [Octave](http://www.gnu.org/software/octave/) | GNU Octave is a high-level interpreted language, primarily intended for numerical computations. | +| [PSBLAS](http://people.uniroma2.it/salvatore.filippone/psblas/) | Most computationally intensive applications work on irregular and sparse domains that complicate their implementation on parallel machines. The major goal of the Parallel Sparse Basic Linear Algebra Subroutines (PSBLAS) project is to provide a framework to enable easy, efficient and portable implementations of iterative solvers for linear systems, while shielding the user from most details of their parallelization. The interface is designed keeping in view a Single Program Multiple Data programming model on distributed memory machines. | +| [PSBLAS-ext](http://people.uniroma2.it/salvatore.filippone/psblas/) | PSBLAS - Extended formats and NVIDIA GPU support | +| [ScientificPython](https://sourcesup.cru.fr/projects/scientific-py/) | ScientificPython is a collection of Python modules for scientific computing. It contains support for geometry, mathematical functions, statistics, physical units, IO, visualization, and parallelization. | ## Mpi -|Module|Description|Available versions| -|--|--|--| -|**bullxmpi**| |<nobr>bullxmpi_1.2.4.1</nobr>| -|**[impi](http://software.intel.com/en-us/intel-mpi-library/)**|The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification.|<nobr>2017.0.098-iccifort-2017.0.098-GCC-5.4.0-2.26</br>5.1.2.150-iccifort-2016.1.150-GCC-4.9.3-2.25</br>5.0.3.048-iccifort-2015.3.187-GNU-5.1.0-2.25</br>5.0.3.048</br>4.1.1.036</br>5.0.3.048-GCC-4.9.3</nobr>| -|**lam**| |<nobr>7.1.4-icc</nobr>| -|**[MPICH](http://www.mpich.org/)**|MPICH v3.x is an open source high-performance MPI 3.0 implementation. It does not support InfiniBand (use MVAPICH2 with InfiniBand devices).|<nobr>3.2-GCC-4.9.3-2.25</nobr>| -|**mvapich2**| |<nobr>1.9-gcc46</br>1.9-icc</br>1.9-gcc</nobr>| -|**[OpenMPI](http://www.open-mpi.org/)**|The Open MPI Project is an open source MPI-2 implementation.|<nobr>1.10.2-GCC-5.3.0-2.26</br>1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.8.8-GNU-4.9.3-2.25</br>1.10.2-GCC-4.9.3-2.25</nobr>| -|**openmpi**| |<nobr>1.8.1-gcc46</br>1.8.1-gcc49</br>1.6.5-gcc</br>1.8.1-gcc</br>1.6.5-icc</br>1.6.5-gcc46</br>1.8.1-icc</nobr>| +| Module | Description | +| ------ | ----------- | +| bullxmpi | | +| [impi](http://software.intel.com/en-us/intel-mpi-library/) | The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification. | +| lam | | +| [MPICH](http://www.mpich.org/) | MPICH v3.x is an open source high-performance MPI 3.0 implementation. It does not support InfiniBand (use MVAPICH2 with InfiniBand devices). | +| mvapich2 | | +| [OpenMPI](http://www.open-mpi.org/) | The Open MPI Project is an open source MPI-2 implementation. | +| openmpi | | ## Numlib -|Module|Description|Available versions| -|--|--|--| -|**[Armadillo](http://arma.sourceforge.net/)**|Armadillo is an open-source C++ linear algebra library (matrix maths) aiming towards a good balance between speed and ease of use. Integer, floating point and complex numbers are supported, as well as a subset of trigonometric and statistics functions.|<nobr>7.500.0-foss-2016a-Python-3.5.2</nobr>| -|**[arpack-ng](http://forge.scilab.org/index.php/p/arpack-ng/)**|ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems.|<nobr>3.3.0-foss-2016a</nobr>| -|**[ATLAS](http://math-atlas.sourceforge.net)**|ATLAS (Automatically Tuned Linear Algebra Software) is the application of the AEOS (Automated Empirical Optimization of Software) paradigm, with the present emphasis on the Basic Linear Algebra Subprograms (BLAS), a widely used, performance-critical, linear algebra kernel library.|<nobr>3.10.1-GCC-4.9.3-2.25-LAPACK-3.4.2</nobr>| -|**[FFTW](http://www.fftw.org)**|FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data.|<nobr>3.3.4-intel-2015b</br>3.3.4-gompi-2016.04</br>3.3.4-gompi-2016a</br>3.3.5-intel-2017.00</br>3.3.4-gompi-2015g</nobr>| -|**[GSL](http://www.gnu.org/software/gsl/)**|The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting.|<nobr>2.1-intel-2015b</br>1.16-intel-2015b</br>1.16-intel-2016.01</nobr>| -|**[imkl](http://software.intel.com/en-us/intel-mkl/)**|Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more.|<nobr>11.2.3.187-gimpi-2.11.5</br>11.3.1.150-iimpi-2016.01-GCC-4.9.3-2.25</br>2017.0.098-iimpi-2017.00-GCC-5.4.0-2.26</br>11.2.3.187-iimpi-7.3.5-GNU-5.1.0-2.25</br>11.3.1.150-iimpi-8.1.5-GCC-4.9.3-2.25</nobr>| -|**[OpenBLAS](http://xianyi.github.com/OpenBLAS/)**|OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.|<nobr>0.2.14-GNU-4.9.3-2.25-LAPACK-3.5.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0</br>0.2.15-GCC-5.1.0-binutils-2.25-LAPACK-3.6.0-gompi-2016a</br>0.2.18-GCC-5.3.0-2.26-LAPACK-3.6.0</br>0.2.15-GCC-4.9.3-2.25-LAPACK-3.6.0</br>0.2.14-GNU-5.1.0-2.25-LAPACK-3.5.0</nobr>| -|**[ScaLAPACK](http://www.netlib.org/scalapack/)**|The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers.|<nobr>2.0.2-gompi-2015g-OpenBLAS-0.2.14-LAPACK-3.5.0</br>2.0.2-gompi-2016.04-OpenBLAS-0.2.18-LAPACK-3.6.0</br>2.0.2-gompi-2016a-OpenBLAS-0.2.15-LAPACK-3.6.0</nobr>| +| Module | Description | +| ------ | ----------- | +| [Armadillo](http://arma.sourceforge.net/) | Armadillo is an open-source C++ linear algebra library (matrix maths) aiming towards a good balance between speed and ease of use. Integer, floating point and complex numbers are supported, as well as a subset of trigonometric and statistics functions. | +| [arpack-ng](http://forge.scilab.org/index.php/p/arpack-ng/) | ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. | +| [ATLAS](http://math-atlas.sourceforge.net) | ATLAS (Automatically Tuned Linear Algebra Software) is the application of the AEOS (Automated Empirical Optimization of Software) paradigm, with the present emphasis on the Basic Linear Algebra Subprograms (BLAS), a widely used, performance-critical, linear algebra kernel library. | +| [FFTW](http://www.fftw.org) | FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data. | +| [GSL](http://www.gnu.org/software/gsl/) | The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting. | +| [imkl](http://software.intel.com/en-us/intel-mkl/) | Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more. | +| [OpenBLAS](http://xianyi.github.com/OpenBLAS/) | OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version. | +| [ScaLAPACK](http://www.netlib.org/scalapack/) | The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers. | ## Nvidia -|Module|Description|Available versions| -|--|--|--| -|**cuda**| |<nobr>7.5</br>6.5.14</br>6.0.37</nobr>| +| Module | Description | +| ------ | ----------- | +| cuda | | ## Omics -|Module|Description|Available versions| -|--|--|--| -|**fastqc**| |<nobr>0.11.2</nobr>| -|**gatk**| |<nobr>2.6-4</br>.2.6-4.swp</nobr>| -|**hpg-aligner**| |<nobr>1.0.0</nobr>| -|**hpg-fastq**| |<nobr>1.0.0</nobr>| -|**hpg-variant**| |<nobr>1.0.0</nobr>| -|**ngsPipeline**| |<nobr>.1.0.0.swp</br>1.0.0</nobr>| -|**picard**| |<nobr>1.117</nobr>| -|**samtools**| |<nobr>0.1.19</nobr>| -|**snpEff**| |<nobr>3.6</nobr>| +| Module | Description | +| ------ | ----------- | +| fastqc | | +| gatk | | +| hpg-aligner | | +| hpg-fastq | | +| hpg-variant | | +| ngsPipeline | | +| picard | | +| samtools | | +| snpEff | | ## Oscar-Modulefiles -|Module|Description|Available versions| -|--|--|--| +| Module | Description | +| ------ | ----------- | ## Oscar-Modules -|Module|Description|Available versions| -|--|--|--| -|**oscar-modules**| |<nobr>1.0.3</nobr>| +| Module | Description | +| ------ | ----------- | +| oscar-modules | | ## Perf -|Module|Description|Available versions| -|--|--|--| -|**[OPARI2](http://www.score-p.org)**|OPARI2, the successor of Forschungszentrum Juelich's OPARI, is a source-to-source instrumentation tool for OpenMP and hybrid codes. It surrounds OpenMP directives and runtime library calls with calls to the POMP2 measurement interface.|<nobr>2.0</nobr>| -|**[OTF2](http://www.score-p.org)**|The Open Trace Format 2 is a highly scalable, memory efficient event trace data format plus support library. It is the new standard trace format for Scalasca, Vampir, and TAU and is open for other tools.|<nobr>2.0</nobr>| -|**[PAPI](http://icl.cs.utk.edu/projects/papi/)**|PAPI provides the tool designer and application engineer with a consistent interface and methodology for use of the performance counter hardware found in most major microprocessors. PAPI enables software engineers to see, in near real time, the relation between software performance and processor events. In addition Component PAPI provides access to a collection of components that expose performance measurement opportunites across the hardware and software stack.|<nobr>5.4.3</nobr>| -|**[Vampir](http://www.vampir.eu)**|The Vampir software tool provides an easy-to-use framework that enables developers to quickly display and analyze arbitrary program behavior at any level of detail. The tool suite implements optimized event analysis algorithms and customizable displays that enable fast and interactive rendering of very complex performance monitoring data.|<nobr>8.5.0</nobr>| +| Module | Description | +| ------ | ----------- | +| [OPARI2](http://www.score-p.org) | OPARI2, the successor of Forschungszentrum Juelich's OPARI, is a source-to-source instrumentation tool for OpenMP and hybrid codes. It surrounds OpenMP directives and runtime library calls with calls to the POMP2 measurement interface. | +| [OTF2](http://www.score-p.org) | The Open Trace Format 2 is a highly scalable, memory efficient event trace data format plus support library. It is the new standard trace format for Scalasca, Vampir, and TAU and is open for other tools. | +| [PAPI](http://icl.cs.utk.edu/projects/papi/) | PAPI provides the tool designer and application engineer with a consistent interface and methodology for use of the performance counter hardware found in most major microprocessors. PAPI enables software engineers to see, in near real time, the relation between software performance and processor events. In addition Component PAPI provides access to a collection of components that expose performance measurement opportunites across the hardware and software stack. | +| [Vampir](http://www.vampir.eu) | The Vampir software tool provides an easy-to-use framework that enables developers to quickly display and analyze arbitrary program behavior at any level of detail. The tool suite implements optimized event analysis algorithms and customizable displays that enable fast and interactive rendering of very complex performance monitoring data. | ## Phys -|Module|Description|Available versions| -|--|--|--| -|**[phono3py](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.|<nobr>1.11.7.8-intel-2015b-Python-2.7.11</nobr>| -|**[phonopy](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.|<nobr>1.11.6.7-intel-2015b-Python-2.7.11</nobr>| -|**VASP**| |<nobr>5.4.1-intel-2015b-24Jun15</br>5.4.1-intel-2017.00-24Jun15</nobr>| +| Module | Description | +| ------ | ----------- | +| [phono3py](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| [phonopy](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| VASP | | ## Prace -|Module|Description|Available versions| -|--|--|--| -|**GLOBUS**| |<nobr>globus</nobr>| -|**PRACE**| |<nobr>prace</nobr>| +| Module | Description | +| ------ | ----------- | +| GLOBUS | | +| PRACE | | ## System -|Module|Description|Available versions| -|--|--|--| -|**[CUDA](https://developer.nvidia.com/cuda-toolkit)**|CUDA (formerly Compute Unified Device Architecture) is a parallel computing platform and programming model created by NVIDIA and implemented by the graphics processing units (GPUs) that they produce. CUDA gives developers access to the virtual instruction set and memory of the parallel computational elements in CUDA GPUs.|<nobr>7.5.18</nobr>| -|**[hwloc](http://www.open-mpi.org/projects/hwloc/)**|The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently.|<nobr>1.11.0</br>1.11.3-GCC-5.3.0-2.26</br>1.11.1-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.11.0-GNU-4.9.3-2.25</br>1.11.2-GCC-4.9.3-2.25</br>1.11.0-GNU-5.1.0-2.25</nobr>| -|**[libpciaccess](http://cgit.freedesktop.org/xorg/lib/libpciaccess/)**|Generic PCI access library.|<nobr>0.13.1</nobr>| +| Module | Description | +| ------ | ----------- | +| [CUDA](https://developer.nvidia.com/cuda-toolkit) | CUDA (formerly Compute Unified Device Architecture) is a parallel computing platform and programming model created by NVIDIA and implemented by the graphics processing units (GPUs) that they produce. CUDA gives developers access to the virtual instruction set and memory of the parallel computational elements in CUDA GPUs. | +| [hwloc](http://www.open-mpi.org/projects/hwloc/) | The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. | +| [libpciaccess](http://cgit.freedesktop.org/xorg/lib/libpciaccess/) | Generic PCI access library. | ## Toolchain -|Module|Description|Available versions| -|--|--|--| -|**[foss]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK.|<nobr>2016a</br>2016.04</br>2015g</nobr>| -|**[gimkl]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, next to Intel MPI and Intel MKL (BLAS, (Sca)LAPACK, FFTW).|<nobr>2.11.5</nobr>| -|**[gimpi]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, next to Intel MPI.|<nobr>2.11.5</nobr>| -|**[GNU](http://www.gnu.org/software/)**|Compiler-only toolchain with GCC and binutils.|<nobr>4.9.3-2.25</br>5.1.0-2.25</nobr>| -|**[gompi]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support.|<nobr>2016a</br>2016.04</br>2015g</nobr>| -|**[iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel C, C++ and Fortran compilers|<nobr>2015.3.187-GNU-5.1.0-2.25</br>2017.0.098-GCC-5.4.0-2.26</br>2016.1.150-GCC-4.9.3-2.25</br>2015.3.187-GNU-4.9.3-2.25</nobr>| -|**[iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel C/C++ and Fortran compilers, alongside Intel MPI.|<nobr>8.1.5-GCC-4.9.3-2.25</br>7.3.5-GNU-5.1.0-2.25</br>2017.00-GCC-5.4.0-2.26</br>2016.01-GCC-4.9.3-2.25</nobr>| -|**intel**| |<nobr>13.5.192</br>2017.00</br>2015b</br>2016.01</br>2016a</br>15.3.187</br>14.0.1</br>15.2.164</nobr>| +| Module | Description | +| ------ | ----------- | +| [foss]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK. | +| [gimkl]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, next to Intel MPI and Intel MKL (BLAS, (Sca)LAPACK, FFTW). | +| [gimpi]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, next to Intel MPI. | +| [GNU](http://www.gnu.org/software/) | Compiler-only toolchain with GCC and binutils. | +| [gompi]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support. | +| [iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C, C++ and Fortran compilers | +| [iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C/C++ and Fortran compilers, alongside Intel MPI. | +| intel | | ## Tools -|Module|Description|Available versions| -|--|--|--| -|**advisor_xe**| |<nobr>2013.5</br>2015.1.10.380555</nobr>| -|**[APR](http://apr.apache.org/)**|Apache Portable Runtime (APR) libraries.|<nobr>1.5.2-foss-2015g</br>1.5.2</nobr>| -|**[APR-util](http://apr.apache.org/)**|Apache Portable Runtime (APR) util libraries.|<nobr>1.5.4</br>1.5.4-foss-2015g</nobr>| -|**[Bash](http://www.gnu.org/software/bash)**|Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh).|<nobr>4.3</nobr>| -|**[binutils](http://directory.fsf.org/project/binutils/)**|binutils: GNU binary utilities|<nobr>2.25</br>2.26-GCCcore-5.4.0</br>2.26</br>2.25-GCC-5.1.0-binutils-2.25</br>2.25-GCCcore-4.9.3</br>2.26-GCCcore-5.3.0</br>2.25-GCC-4.9.3</br>2.25-GCC-4.9.3-binutils-2.25</nobr>| -|**[bzip2](http://www.bzip.org/)**|bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression.|<nobr>1.0.6-intel-2017.00</br>1.0.6-intel-2016.01</br>1.0.6</br>1.0.6-foss-2016a</br>1.0.6-intel-2016a</br>1.0.6-intel-2015b</br>.1.0.6-foss-2015g.lua</br>1.0.6-foss-2015g</nobr>| -|**cmake**| |<nobr>2.8.11</br>2.8.11-mic</nobr>| -|**cube**| |<nobr>4.2.3-gcc</br>4.2.3-icc</nobr>| -|**[cURL](http://curl.haxx.se)**|libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more.|<nobr>7.51.0</br>7.37.1-foss-2015g</br>7.37.1</nobr>| -|**dhi-mike**| |<nobr>2016-SP2</br>2014</br>2016</br>default</nobr>| -|**[EasyBuild](http://hpcugent.github.com/easybuild/)**|EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way.|<nobr>2.6.0</br>2.8.1</br>3.0.0</br>2.7.0</br>3.0.2.lua</br>2.8.0</nobr>| -|**elmer**| |<nobr>7.0-r6695-dbg</br>7.0-r6695-opt</nobr>| -|**[expat](http://expat.sourceforge.net/)**|Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags)|<nobr>2.1.0-foss-2015g</br>2.1.0</nobr>| -|**git**| |<nobr>2.11.0-GNU-4.9.3-2.25.lua</br>2.8.0-GNU-4.9.3-2.25</nobr>| -|**gnuplot**| |<nobr>4.6.5</nobr>| -|**grace**| |<nobr>5.1.23</nobr>| -|**[gzip](http://www.gnu.org/software/gzip/)**|gzip (GNU zip) is a popular data compression program as a replacement for compress|<nobr>1.6-foss-2015g</br>1.6</br>1.6-foss-2016a</br>1.6-intel-2015b</nobr>| -|**inspector_xe**| |<nobr>2015.1.2.379161</br>2013.5</nobr>| -|**intelpcm**| |<nobr>2.6</nobr>| -|**ipm**| |<nobr>0.983-icc-impi</nobr>| -|**itac**| |<nobr>8.1.4.045</br>9.0.3.051</nobr>| -|**[JOE](http://joe-editor.sourceforge.net)**|JOE is a full featured terminal-based screen editor which is distributed under the GNU General Public License (GPL)|<nobr>4.2</nobr>| -|**likwid**| |<nobr>3.1.2-gcc</br>3.1.1-mic</br>3.1.2-icc</br>3.1.1-icc</br>3.1.2-mic</nobr>| -|**Lmod**| |<nobr>7.2.2.lua</br>.7.0.6</br>.6.4.2</nobr>| -|**MATLAB**| |<nobr>2015b-EDU</br>2015b-COM</nobr>| -|**memoryscape**| |<nobr>3.4</nobr>| -|**[Mercurial](http://mercurial.selenic.com/)**|Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface.|<nobr>3.7.3-foss-2015g-Python-2.7.9</nobr>| -|**mercurial**| |<nobr>2.9.1</nobr>| -|**MIKE**| |<nobr>2016-SP2</br>2014</br>2016</br>default</nobr>| -|**mono**| |<nobr>3.12.1</br>3.2.3</nobr>| -|**mpi.net**| |<nobr>1.0.0-mono-3.12.1</br>1.0.0-impi</br>1.0.0</nobr>| -|**[numactl](http://oss.sgi.com/projects/libnuma/)**|The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program.|<nobr>2.0.10-GNU-5.1.0-2.25</br>2.0.9</br>2.0.11-GCC-5.3.0-2.26</br>2.0.11-GCC-4.9.3-2.25</br>2.0.10-iccifort-2015.3.187-GNU-4.9.3-2.25</br>2.0.11</br>2.0.10-GNU-4.9.3-2.25</br>2.0.10</nobr>| -|**octave**| |<nobr>.hg-20130730</br>.3.6.4</nobr>| -|**opari2**| |<nobr>1.1.2-gcc</br>1.1.2-icc</nobr>| -|**openssh-x509**| |<nobr>6.2p2</nobr>| -|**oscar-modules**| |<nobr>1.0.3</nobr>| -|**otf2**| |<nobr>1.4-icc</br>1.2.1-gcc</br>1.4-gcc</br>1.2.1-icc</nobr>| -|**papi**| |<nobr>5.4.0-mic</br>5.3.0</br>5.3.2-mic</br>5.4.0</br>5.3.2</nobr>| -|**parallel**| |<nobr>20141122</nobr>| -|**python**| |<nobr>2.7.6</br>2.7.5</br>3.3.2</br>3.3.5</br>3.4.2</nobr>| -|**R**| |<nobr>3.1.1</br>.old.3.0.1</br>3.0.1</nobr>| -|**racket**| |<nobr>6.0.1</nobr>| -|**relion**| |<nobr>1.3</br>1.2</nobr>| -|**Rstudio**| |<nobr>0.97</nobr>| -|**ruby**| |<nobr>2.0.0-p247</nobr>| -|**scalasca2**| |<nobr>2.0-icc-impi</br>2.0-gcc-openmpi</nobr>| -|**scite**| |<nobr>3.4.3</nobr>| -|**scorep**| |<nobr>1.2.3-gcc-openmpi</br>1.2.3-icc-impi</nobr>| -|**[Serf](http://serf.apache.org/)**|The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library|<nobr>1.3.8-foss-2015g</nobr>| -|**[Subversion](http://subversion.apache.org/)**|Subversion is an open source version control system.|<nobr>1.8.16-foss-2015g</nobr>| -|**[Szip](http://www.hdfgroup.org/doc_resource/SZIP/)**|Szip compression software, providing lossless compression of scientific data|<nobr>2.1-intel-2015b</br>2.1-intel-2016.01</br>2.1-foss-2016a</br>2.1</nobr>| -|**tcl**| |<nobr>8.5.15</nobr>| -|**[tcsh](http://www.tcsh.org)**|Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax.|<nobr>6.19.00</nobr>| -|**tk**| |<nobr>8.5.15</nobr>| -|**totalview**| |<nobr>8.13</br>8.12</nobr>| -|**turbovnc**| |<nobr>1.2.2</nobr>| -|**[util-linux](http://www.kernel.org/pub/linux/utils/util-linux)**|Set of Linux utilities|<nobr>2.26.1</br>2.28-intel-2016a.lua</nobr>| -|**valgrind**| |<nobr>3.9.0-impi</nobr>| -|**vampir**| |<nobr>8.2</nobr>| -|**virtualgl**| |<nobr>2.4</nobr>| -|**[VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe)**|Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java.|<nobr>2016_update1</nobr>| -|**vtune_xe**| |<nobr>2013.15</br>2015.3.0.403110</nobr>| -|**[XZ](http://tukaani.org/xz/)**|xz: XZ utilities|<nobr>5.2.2-foss-2016a</br>5.2.2-intel-2017.00</br>5.2.2.lua</nobr>| +| Module | Description | +| ------ | ----------- | +| advisor_xe | | +| [APR](http://apr.apache.org/) | Apache Portable Runtime (APR) libraries. | +| [APR-util](http://apr.apache.org/) | Apache Portable Runtime (APR) util libraries. | +| [Bash](http://www.gnu.org/software/bash) | Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh). | +| [binutils](http://directory.fsf.org/project/binutils/) | binutils: GNU binary utilities | +| [bzip2](http://www.bzip.org/) | bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression. | +| cmake | | +| cube | | +| [cURL](http://curl.haxx.se) | libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more. | +| dhi-mike | | +| [EasyBuild](http://hpcugent.github.com/easybuild/) | EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way. | +| elmer | | +| [expat](http://expat.sourceforge.net/) | Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags) | +| git | | +| gnuplot | | +| grace | | +| [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress | +| inspector_xe | | +| intelpcm | | +| ipm | | +| itac | | +| [JOE](http://joe-editor.sourceforge.net) | JOE is a full featured terminal-based screen editor which is distributed under the GNU General Public License (GPL) | +| likwid | | +| Lmod | | +| MATLAB | | +| memoryscape | | +| [Mercurial](http://mercurial.selenic.com/) | Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface. | +| mercurial | | +| MIKE | | +| mono | | +| mpi.net | | +| [numactl](http://oss.sgi.com/projects/libnuma/) | The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program. | +| octave | | +| opari2 | | +| openssh-x509 | | +| oscar-modules | | +| otf2 | | +| papi | | +| parallel | | +| python | | +| R | | +| racket | | +| relion | | +| Rstudio | | +| ruby | | +| scalasca2 | | +| scite | | +| scorep | | +| [Serf](http://serf.apache.org/) | The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library | +| [Subversion](http://subversion.apache.org/) | Subversion is an open source version control system. | +| [Szip](http://www.hdfgroup.org/doc_resource/SZIP/) | Szip compression software, providing lossless compression of scientific data | +| tcl | | +| [tcsh](http://www.tcsh.org) | Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax. | +| tk | | +| totalview | | +| turbovnc | | +| [util-linux](http://www.kernel.org/pub/linux/utils/util-linux) | Set of Linux utilities | +| valgrind | | +| vampir | | +| virtualgl | | +| [VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe) | Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java. | +| vtune_xe | | +| [XZ](http://tukaani.org/xz/) | xz: XZ utilities | ## Virtualization -|Module|Description|Available versions| -|--|--|--| -|**qemu**| |<nobr>2.1.2-vde2</br>2.1.2</br>2.1.0</br>2.1.0-vde2</nobr>| -|**vde2**| |<nobr>2.3.2</nobr>| -|**wine**| |<nobr>1.7.29</nobr>| +| Module | Description | +| ------ | ----------- | +| qemu | | +| vde2 | | +| wine | | ## Vis -|Module|Description|Available versions| -|--|--|--| -|**[cairo](http://cairographics.org)**|Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. Experimental backends include OpenGL, BeOS, OS/2, and DirectFB|<nobr>1.12.18</nobr>| -|**[ffmpeg](https://www.ffmpeg.org/)**|A complete, cross-platform solution to record, convert and stream audio and video.|<nobr>2.4</nobr>| -|**[fixesproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X.org FixesProto protocol headers.|<nobr>5.0</nobr>| -|**[FLTK](http://www.fltk.org)**|FLTK is a cross-platform C++ GUI toolkit for UNIX/Linux (X11), Microsoft Windows, and MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its built-in GLUT emulation.|<nobr>1.3.2</nobr>| -|**[fontconfig](http://www.freedesktop.org/software/fontconfig)**|Fontconfig is a library designed to provide system-wide font configuration, customization and application access.|<nobr>2.11.1</nobr>| -|**[freetype](http://freetype.org)**|FreeType 2 is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). It can be used in graphics libraries, display servers, font conversion tools, text image generation tools, and many other products as well.|<nobr>2.5.3</nobr>| -|**gettext**| |<nobr>.0.19.2.lua</br>0.19.6-intel-2017.00</br>0.19.6-foss-2016a</br>0.19.4.lua</br>0.19.2</br>0.19.2-foss-2015g</nobr>| -|**[GLib](http://www.gtk.org/)**|GLib is one of the base libraries of the GTK+ project|<nobr>2.40.0</nobr>| -|**[inputproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X.org InputProto protocol headers.|<nobr>2.3</nobr>| -|**[kbproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X.org KBProto protocol headers.|<nobr>1.0.6</nobr>| -|**[libICE](http://www.freedesktop.org/wiki/Software/xlibs)**|X Inter-Client Exchange library for freedesktop.org|<nobr>1.0.9</nobr>| -|**[libX11](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 client-side library|<nobr>1.6.2-Python-2.7.9</nobr>| -|**[libXau](http://www.freedesktop.org/wiki/Software/xlibs)**|The libXau package contains a library implementing the X11 Authorization Protocol. This is useful for restricting client access to the display.|<nobr>1.0.8</nobr>| -|**[libXdmcp](http://www.freedesktop.org/wiki/Software/xlibs)**|The libXdmcp package contains a library implementing the X Display Manager Control Protocol. This is useful for allowing clients to interact with the X Display Manager.|<nobr>1.1.2</nobr>| -|**[libXext](http://www.freedesktop.org/wiki/Software/xlibs)**|Common X Extensions library|<nobr>1.3.3</nobr>| -|**[libXfixes](http://www.freedesktop.org/wiki/Software/xlibs)**|X Fixes extension library|<nobr>5.0.1</nobr>| -|**[libXfont](http://www.freedesktop.org/wiki/Software/xlibs)**|X font libary|<nobr>1.5.1-Python-2.7.9</nobr>| -|**[libXt](http://www.freedesktop.org/wiki/Software/xlibs)**|libXt provides the X Toolkit Intrinsics, an abstract widget library upon which other toolkits are based. Xt is the basis for many toolkits, including the Athena widgets (Xaw), and LessTif (a Motif implementation).|<nobr>1.1.4-libX11-1.6.2</nobr>| -|**[pixman](http://www.pixman.org/)**|Pixman is a low-level software library for pixel manipulation, providing features such as image compositing and trapezoid rasterization. Important users of pixman are the cairo graphics library and the X server.|<nobr>0.32.6</nobr>| -|**[Tk](http://www.tcl.tk/)**|Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages.|<nobr>8.6.4-foss-2016a-no-X11</br>8.6.4-intel-2017.00-no-X11</br>8.6.4-intel-2015b-no-X11</br>8.6.4-no-X11</br>8.5.12</nobr>| +| Module | Description | +| ------ | ----------- | +| [cairo](http://cairographics.org) | Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. Experimental backends include OpenGL, BeOS, OS/2, and DirectFB | +| [ffmpeg](https://www.ffmpeg.org/) | A complete, cross-platform solution to record, convert and stream audio and video. | +| [fixesproto](http://www.freedesktop.org/wiki/Software/xlibs) | X.org FixesProto protocol headers. | +| [FLTK](http://www.fltk.org) | FLTK is a cross-platform C++ GUI toolkit for UNIX/Linux (X11), Microsoft Windows, and MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its built-in GLUT emulation. | +| [fontconfig](http://www.freedesktop.org/software/fontconfig) | Fontconfig is a library designed to provide system-wide font configuration, customization and application access. | +| [freetype](http://freetype.org) | FreeType 2 is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). It can be used in graphics libraries, display servers, font conversion tools, text image generation tools, and many other products as well. | +| gettext | | +| [GLib](http://www.gtk.org/) | GLib is one of the base libraries of the GTK+ project | +| [inputproto](http://www.freedesktop.org/wiki/Software/xlibs) | X.org InputProto protocol headers. | +| [kbproto](http://www.freedesktop.org/wiki/Software/xlibs) | X.org KBProto protocol headers. | +| [libICE](http://www.freedesktop.org/wiki/Software/xlibs) | X Inter-Client Exchange library for freedesktop.org | +| [libX11](http://www.freedesktop.org/wiki/Software/xlibs) | X11 client-side library | +| [libXau](http://www.freedesktop.org/wiki/Software/xlibs) | The libXau package contains a library implementing the X11 Authorization Protocol. This is useful for restricting client access to the display. | +| [libXdmcp](http://www.freedesktop.org/wiki/Software/xlibs) | The libXdmcp package contains a library implementing the X Display Manager Control Protocol. This is useful for allowing clients to interact with the X Display Manager. | +| [libXext](http://www.freedesktop.org/wiki/Software/xlibs) | Common X Extensions library | +| [libXfixes](http://www.freedesktop.org/wiki/Software/xlibs) | X Fixes extension library | +| [libXfont](http://www.freedesktop.org/wiki/Software/xlibs) | X font libary | +| [libXt](http://www.freedesktop.org/wiki/Software/xlibs) | libXt provides the X Toolkit Intrinsics, an abstract widget library upon which other toolkits are based. Xt is the basis for many toolkits, including the Athena widgets (Xaw), and LessTif (a Motif implementation). | +| [pixman](http://www.pixman.org/) | Pixman is a low-level software library for pixel manipulation, providing features such as image compositing and trapezoid rasterization. Important users of pixman are the cairo graphics library and the X server. | +| [Tk](http://www.tcl.tk/) | Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages. | diff --git a/docs.it4i/modules-matrix.md b/docs.it4i/modules-matrix.md index 28c7d9eba75cec4e169ec8f905dbe15f10e4f3ed..fd1c8b86f58262e7c21826dcf7e8d403a59a5682 100644 --- a/docs.it4i/modules-matrix.md +++ b/docs.it4i/modules-matrix.md @@ -1,3 +1,7 @@ +!!! Hint "Cluster Acronyms" + \* A - Anselm + \* S - Salomon + \* U - uv1 at Salomon | Module | Versions | Clusters | | ------ | -------- | -------- | | abinit | 7.10.1-gcc-openmpi</br>7.10.1-icc-impi</br>7.6.2 | `--A`</br>`--A`</br>`--A` | @@ -9,7 +13,7 @@ | advisor_xe | 2015.1.10.380555</br>2013.5 | `--A`</br>`--A` | | aislinn | 20160105-Python-2.7.9-gompi-2015e | `-S-` | | almost | 2.1.0-intel-2015b</br>2.1.0-foss-2015g</br>2.1.0-foss-2016a</br>2.1.0-foss-2015b | `-S-`</br>`-SA`</br>`--A`</br>`-S-` | -| Amber | 14 | `-SA` | +| Amber | 14 | `-S-` | | ANSYS | 17.0</br>16.1 | `US-`</br>`US-` | | ansys | 14.5.x</br>15.0.x</br>16.0.x | `--A`</br>`--A`</br>`--A` | | ant | 1.9.3-Java-1.7.0_79 | `-S-` | @@ -31,7 +35,7 @@ | boost | 1.56-gcc-openmpi</br>1.56-icc-impi | `--A`</br>`--A` | | Boost | 1.60.0-foss-2015g-Python-2.7.9</br>1.58.0-Python-2.7.9</br>1.58.0-ictce-7.3.5-Python-2.7.9</br>1.59.0-intel-2015b</br>1.59.0-intel-2015b-Python-2.7.11</br>1.59.0-intel-2016.01</br>1.60.0-intel-2015b-Python-2.7.11</br>1.61.0-foss-2016a-serial</br>1.58.0-foss-2015g-Python-2.7.9</br>1.58.0-intel-2015b-Python-2.7.9</br>1.60.0-intel-2016a</br>1.61.0-foss-2016a</br>1.58.0-intel-2016.01-Python-2.7.9</br>1.58.0-gompi-2015e-Python-2.7.9 | `USA`</br>`-S-`</br>`-S-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-`</br>`-SA`</br>`-SA`</br>`US-`</br>`--A`</br>`-S-`</br>`-S-`</br>`-S-` | | bowtie2 | 2.2.3 | `--A` | -| bullxde | 2.0 | `--A` | +| bullxde | 2.0 | `-S-` | | bullxmpi | bullxmpi-1.2.4.3</br>bullxmpi_1.2.4.1 | `--A`</br>`--A` | | bupc | 2.16.2 | `--A` | | BWA | 0.7.5a-foss-2015g | `-S-` | @@ -195,7 +199,7 @@ | LLVM | 3.8.0-foss-2016a</br>3.7.1-foss-2015g</br>3.8.0-intel-2016a</br>3.7.1-intel-2016a</br>3.9.0-intel-2017.00 | `US-`</br>`-S-`</br>`-S-`</br>`-S-`</br>`US-` | | llvm | 3.6.0 | `--A` | | lmod | 7.2.2 | `USA` | -| Lmod | 7.2.2</br>7.0.6</br>6.4.2 | `-SA`</br>`-SA`</br>`--A` | +| Lmod | 7.2.2</br>7.0.6 | `-SA`</br>`-S-` | | lsdyna | 7.x.x | `--A` | | lsprepost | 4.2 | `--A` | | Lua | 5.1.4-8 | `USA` | @@ -209,7 +213,7 @@ | marc | 2011</br>2013.1 | `--A`</br>`--A` | | Marc | 2013.1.0 | `-S-` | | matlab | R2013a-EDU</br>R2014a-EDU</br>R2014a-COM</br>R2013a-COM | `--A`</br>`--A`</br>`--A`</br>`--A` | -| MATLAB | 2015b-EDU</br>2015a-EDU</br>2015a-COM</br>2015b-COM | `-SA`</br>`USA`</br>`USA`</br>`-SA` | +| MATLAB | 2015b-EDU</br>2015a-EDU</br>2015a-COM</br>2015b-COM | `-SA`</br>`US-`</br>`US-`</br>`-SA` | | matplotlib | 1.4.3-intel-2015b-Python-2.7.9 | `-S-` | | Maven | 3.3.9 | `USA` | | maxwell | 3.0 | `--A` | @@ -271,7 +275,7 @@ | OpenMPI | 1.8.6-iccifort-2015.3.187-GNU-5.1.0-2.25</br>1.8.6-GCC-4.4.7-system</br>1.10.1-GNU-4.9.3-2.25</br>1.8.8-iccifort-2015.3.187-GNU-4.9.3-2.25</br>1.10.2-GCC-4.9.3-2.25</br>1.8.8-GNU-5.1.0-2.25</br>1.10.2-GCC-5.3.0-2.26</br>1.10.1-GCC-4.9.3-2.25</br>1.8.6-GNU-5.1.0-2.25</br>1.8.8-GNU-4.9.3-2.25 | `-S-`</br>`US-`</br>`US-`</br>`-SA`</br>`USA`</br>`-S-`</br>`-SA`</br>`-S-`</br>`US-`</br>`USA` | | openssh-x509 | 6.2p2 | `--A` | | ORCA | 3_0_3-linux_x86-64 | `-SA` | -| oscar-modules | 1.0.3 | `--A` | +| oscar-modules | 1.0.3 | `-S-` | | OSPRay | 0.9.1 | `-S-` | | OTF2 | 1.4-intel-2015b</br>2.0</br>2.0-intel-2015b-mic | `-S-`</br>`-SA`</br>`-S-` | | otf2 | 1.4-icc</br>1.2.1-gcc</br>1.4-gcc</br>1.2.1-icc | `--A`</br>`--A`</br>`--A`</br>`--A` | diff --git a/docs.it4i/modules-salomon-uv.md b/docs.it4i/modules-salomon-uv.md index 1391cefaf1e12d4023f416e4034484e3c73397aa..a1da8f4a2ad238ca622c8355a4c4ca9cc1e40b6d 100644 --- a/docs.it4i/modules-salomon-uv.md +++ b/docs.it4i/modules-salomon-uv.md @@ -1,165 +1,165 @@ -# List of Available UV Modules +# Available Modules ## Bio -|Module|Description| -|--|--| -|**[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/download.html)**|A quality control application for high throughput sequence data| -|**[GATK](http://www.broadinstitute.org/gatk/)**|The Genome Analysis Toolkit or GATK is a software package developed at the Broad Institute to analyse next-generation resequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size.| -|**[SnpEff](http://snpeff.sourceforge.net/)**|Genetic variant annotation and effect prediction toolbox.| +| Module | Description | +| -------| ----------- | +| [FastQC](http://www.bioinformatics.babraham.ac.uk/projects/download.html) | A quality control application for high throughput sequence data | +| [GATK](http://www.broadinstitute.org/gatk/) | The Genome Analysis Toolkit or GATK is a software package developed at the Broad Institute to analyse next-generation resequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size. | +| [SnpEff](http://snpeff.sourceforge.net/) | Genetic variant annotation and effect prediction toolbox. | ## Cae -|Module|Description| -|--|--| -|**ANSYS**| | -|**[OpenFOAM](http://www.openfoam.com/)**|OpenFOAM is a free, open source CFD software package. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, to solid dynamics and electromagnetics.| +| Module | Description | +| -------| ----------- | +| ANSYS | | +| [OpenFOAM](http://www.openfoam.com/) | OpenFOAM is a free, open source CFD software package. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, to solid dynamics and electromagnetics. | ## Chem -|Module|Description| -|--|--| -|**[ABINIT](http://www.abinit.org/)**|Abinit is a plane wave pseudopotential code for doing condensed phase electronic structure calculations using DFT.| -|**[Libint](https://sourceforge.net/p/libint/)**|Libint library is used to evaluate the traditional (electron repulsion) and certain novel two-body matrix elements (integrals) over Cartesian Gaussian functions used in modern atomic and molecular theory.| -|**[libxc](http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)**|Libxc is a library of exchange-correlation functionals for density-functional theory. The aim is to provide a portable, well tested and reliable set of exchange and correlation functionals.| +| Module | Description | +| -------| ----------- | +| [ABINIT](http://www.abinit.org/) | Abinit is a plane wave pseudopotential code for doing condensed phase electronic structure calculations using DFT. | +| [Libint](https://sourceforge.net/p/libint/) | Libint library is used to evaluate the traditional (electron repulsion) and certain novel two-body matrix elements (integrals) over Cartesian Gaussian functions used in modern atomic and molecular theory. | +| [libxc](http://www.tddft.org/programs/octopus/wiki/index.php/Libxc) | Libxc is a library of exchange-correlation functionals for density-functional theory. The aim is to provide a portable, well tested and reliable set of exchange and correlation functionals. | ## Compiler -|Module|Description| -|--|--| -|**[GCC](http://gcc.gnu.org/)**|The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...).| -|**GCCcore**| | -|**[icc](http://software.intel.com/en-us/intel-compilers/)**|C and C++ compiler from Intel| -|**[ifort](http://software.intel.com/en-us/intel-compilers/)**|Fortran compiler from Intel| -|**LLVM**| | +| Module | Description | +| -------| ----------- | +| [GCC](http://gcc.gnu.org/) | The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). | +| GCCcore | | +| [icc](http://software.intel.com/en-us/intel-compilers/) | C and C++ compiler from Intel | +| [ifort](http://software.intel.com/en-us/intel-compilers/) | Fortran compiler from Intel | +| LLVM | | ## Data -|Module|Description| -|--|--| -|**[GDAL](http://www.gdal.org/)**|GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing.| -|**[HDF5](http://www.hdfgroup.org/HDF5/)**|HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections.| -|**[netCDF](http://www.unidata.ucar.edu/software/netcdf/)**|NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data.| +| Module | Description | +| -------| ----------- | +| [GDAL](http://www.gdal.org/) | GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing. | +| [HDF5](http://www.hdfgroup.org/HDF5/) | HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections. | +| [netCDF](http://www.unidata.ucar.edu/software/netcdf/) | NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. | ## Devel -|Module|Description| -|--|--| -|**[Autoconf](http://www.gnu.org/software/autoconf/)**|Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls.| -|**[Automake](http://www.gnu.org/software/automake/automake.html)**|Automake: GNU Standards-compliant Makefile generator| -|**[Autotools](http://autotools.io)**|This bundle collect the standard GNU build tools: Autoconf, Automake and libtool| -|**[Boost](http://www.boost.org/)**|Boost provides free peer-reviewed portable C++ source libraries.| -|**[CMake](http://www.cmake.org)**|CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software.| -|**[Doxygen](http://www.doxygen.org)**|Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D.| -|**[M4](http://www.gnu.org/software/m4/m4.html)**|GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc.| -|**[make](http://www.gnu.org/software/make/make.html)**|make-3.82: GNU version of make utility| -|**[Maven](http://maven.apache.org/index.html)**|Binary maven install, Apache Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information.| -|**[ncurses](http://www.gnu.org/software/ncurses/)**|The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses.| -|**[SQLite](http://www.sqlite.org/)**|SQLite: SQL Database Engine in a C Library| +| Module | Description | +| -------| ----------- | +| [Autoconf](http://www.gnu.org/software/autoconf/) | Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls. | +| [Automake](http://www.gnu.org/software/automake/automake.html) | Automake: GNU Standards-compliant Makefile generator | +| [Autotools](http://autotools.io) | This bundle collect the standard GNU build tools: Autoconf, Automake and libtool | +| [Boost](http://www.boost.org/) | Boost provides free peer-reviewed portable C++ source libraries. | +| [CMake](http://www.cmake.org) | CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software. | +| [Doxygen](http://www.doxygen.org) | Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D. | +| [M4](http://www.gnu.org/software/m4/m4.html) | GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc. | +| [make](http://www.gnu.org/software/make/make.html) | make-3.82: GNU version of make utility | +| [Maven](http://maven.apache.org/index.html) | Binary maven install, Apache Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information. | +| [ncurses](http://www.gnu.org/software/ncurses/) | The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses. | +| [SQLite](http://www.sqlite.org/) | SQLite: SQL Database Engine in a C Library | ## Lang -|Module|Description| -|--|--| -|**[Bison](http://www.gnu.org/software/bison)**|Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables.| -|**[flex](http://flex.sourceforge.net/)**|Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text.| -|**[Java](http://java.com/)**|Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers.| -|**[Lua](http://www.lua.org/)**|Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping.| -|**[NASM](http://www.nasm.us/)**|NASM: General-purpose x86 assembler| -|**[Perl](http://www.perl.org/)**|Larry Wall's Practical Extraction and Report Language| -|**[Python](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.| -|**[R](http://www.r-project.org/)**|R is a free software environment for statistical computing and graphics.| -|**[Tcl](http://www.tcl.tk/)**|Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more.| +| Module | Description | +| -------| ----------- | +| [Bison](http://www.gnu.org/software/bison) | Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. | +| [flex](http://flex.sourceforge.net/) | Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text. | +| [Java](http://java.com/) | Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers. | +| [Lua](http://www.lua.org/) | Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping. | +| [NASM](http://www.nasm.us/) | NASM: General-purpose x86 assembler | +| [Perl](http://www.perl.org/) | Larry Wall's Practical Extraction and Report Language | +| [Python](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| [R](http://www.r-project.org/) | R is a free software environment for statistical computing and graphics. | +| [Tcl](http://www.tcl.tk/) | Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more. | ## Lib -|Module|Description| -|--|--| -|**[libffi](http://sourceware.org/libffi/)**|The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time.| -|**[libjpeg-turbo](http://sourceforge.net/libjpeg-turbo/)**|libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding.| -|**[libpng](http://www.libpng.org/pub/png/libpng.html)**|libpng is the official PNG reference library| -|**[libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html)**|The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands.| -|**[libtool](http://www.gnu.org/software/libtool)**|GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface.| -|**[libxml2](http://xmlsoft.org/)**|Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform).| -|**[PROJ](http://trac.osgeo.org/proj/)**|Program proj is a standard Unix filter function which converts geographic longitude and latitude coordinates into cartesian coordinates| -|**[tbb](http://software.intel.com/en-us/articles/intel-tbb/)**|Intel Threading Building Blocks 4.0 (Intel TBB) is a widely used, award-winning C++ template library for creating reliable, portable, and scalable parallel applications. Use Intel TBB for a simple and rapid way of developing robust task-based parallel applications that scale to available processor cores, are compatible with multiple environments, and are easier to maintain. Intel TBB is the most proficient way to implement future-proof parallel applications that tap into the power and performance of multicore and manycore hardware platforms.| -|**[zlib](http://www.zlib.net/)**|zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system.| +| Module | Description | +| -------| ----------- | +| [libffi](http://sourceware.org/libffi/) | The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time. | +| [libjpeg-turbo](http://sourceforge.net/libjpeg-turbo/) | libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding. | +| [libpng](http://www.libpng.org/pub/png/libpng.html) | libpng is the official PNG reference library | +| [libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html) | The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands. | +| [libtool](http://www.gnu.org/software/libtool) | GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface. | +| [libxml2](http://xmlsoft.org/) | Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform). | +| [PROJ](http://trac.osgeo.org/proj/) | Program proj is a standard Unix filter function which converts geographic longitude and latitude coordinates into cartesian coordinates | +| [tbb](http://software.intel.com/en-us/articles/intel-tbb/) | Intel Threading Building Blocks 4.0 (Intel TBB) is a widely used, award-winning C++ template library for creating reliable, portable, and scalable parallel applications. Use Intel TBB for a simple and rapid way of developing robust task-based parallel applications that scale to available processor cores, are compatible with multiple environments, and are easier to maintain. Intel TBB is the most proficient way to implement future-proof parallel applications that tap into the power and performance of multicore and manycore hardware platforms. | +| [zlib](http://www.zlib.net/) | zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system. | ## Math -|Module|Description| -|--|--| -|**GMP**| | -|**[SCOTCH](http://gforge.inria.fr/projects/scotch/)**|Software package and libraries for sequential and parallel graph partitioning, static mapping, and sparse matrix block ordering, and sequential mesh and hypergraph partitioning.| +| Module | Description | +| -------| ----------- | +| GMP | | +| [SCOTCH](http://gforge.inria.fr/projects/scotch/) | Software package and libraries for sequential and parallel graph partitioning, static mapping, and sparse matrix block ordering, and sequential mesh and hypergraph partitioning. | ## Mpi -|Module|Description| -|--|--| -|**[impi](http://software.intel.com/en-us/intel-mpi-library/)**|The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification.| -|**[OpenMPI](http://www.open-mpi.org/)**|The Open MPI Project is an open source MPI-2 implementation.| +| Module | Description | +| -------| ----------- | +| [impi](http://software.intel.com/en-us/intel-mpi-library/) | The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification. | +| [OpenMPI](http://www.open-mpi.org/) | The Open MPI Project is an open source MPI-2 implementation. | ## Numlib -|Module|Description| -|--|--| -|**[FFTW](http://www.fftw.org)**|FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data.| -|**[imkl](http://software.intel.com/en-us/intel-mkl/)**|Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more.| -|**[OpenBLAS](http://xianyi.github.com/OpenBLAS/)**|OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.| -|**[ScaLAPACK](http://www.netlib.org/scalapack/)**|The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers.| +| Module | Description | +| -------| ----------- | +| [FFTW](http://www.fftw.org) | FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data. | +| [imkl](http://software.intel.com/en-us/intel-mkl/) | Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more. | +| [OpenBLAS](http://xianyi.github.com/OpenBLAS/) | OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version. | +| [ScaLAPACK](http://www.netlib.org/scalapack/) | The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers. | ## Phys -|Module|Description| -|--|--| -|**[VASP](http://www.vasp.at)**|The Vienna Ab initio Simulation Package (VASP) is a computer program for atomic scale materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics, from first principles.| +| Module | Description | +| -------| ----------- | +| [VASP](http://www.vasp.at) | The Vienna Ab initio Simulation Package (VASP) is a computer program for atomic scale materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics, from first principles. | ## System -|Module|Description| -|--|--| -|**[hwloc](http://www.open-mpi.org/projects/hwloc/)**|The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently.| +| Module | Description | +| -------| ----------- | +| [hwloc](http://www.open-mpi.org/projects/hwloc/) | The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. | ## Toolchain -|Module|Description| -|--|--| -|**[foss]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK.| -|**[GNU](http://www.gnu.org/software/)**|Compiler-only toolchain with GCC and binutils.| -|**[gompi]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support.| -|**[iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel C, C++ and Fortran compilers| -|**[iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel C/C++ and Fortran compilers, alongside Intel MPI.| -|**[intel](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL.| -|**[PRACE](http://www.prace-ri.eu/PRACE-Common-Production)**|The PRACE Common Production Environment (PCPE) is a set of software tools and libraries that are planned to be available on all PRACE execution sites. The PCPE also defines a set of environment variables that try to make compilation on all sites as homogeneous and simple as possible.| +| Module | Description | +| -------| ----------- | +| [foss]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK. | +| [GNU](http://www.gnu.org/software/) | Compiler-only toolchain with GCC and binutils. | +| [gompi]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support. | +| [iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C, C++ and Fortran compilers | +| [iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C/C++ and Fortran compilers, alongside Intel MPI. | +| [intel](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL. | +| [PRACE](http://www.prace-ri.eu/PRACE-Common-Production) | The PRACE Common Production Environment (PCPE) is a set of software tools and libraries that are planned to be available on all PRACE execution sites. The PCPE also defines a set of environment variables that try to make compilation on all sites as homogeneous and simple as possible. | ## Tools -|Module|Description| -|--|--| -|**[Bash](http://www.gnu.org/software/bash)**|Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh).| -|**[binutils](http://directory.fsf.org/project/binutils/)**|binutils: GNU binary utilities| -|**[bzip2](http://www.bzip.org/)**|bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression.| -|**[cURL](http://curl.haxx.se)**|libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more.| -|**[EasyBuild](http://hpcugent.github.com/easybuild/)**|EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way.| -|**[expat](http://expat.sourceforge.net/)**|Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags)| -|**git**| | -|**[gzip](http://www.gnu.org/software/gzip/)**|gzip (GNU zip) is a popular data compression program as a replacement for compress| -|**MATLAB**| | -|**[Mercurial](http://mercurial.selenic.com/)**|Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface.| -|**[numactl](http://oss.sgi.com/projects/libnuma/)**|The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program.| -|**pigz**| | -|**[QEMU](http://wiki.qemu.org/Main_Page)**|QEMU is a generic and open source machine emulator and virtualizer.| -|**[Szip](http://www.hdfgroup.org/doc_resource/SZIP/)**|Szip compression software, providing lossless compression of scientific data| -|**[tcsh](http://www.tcsh.org)**|Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax.| -|**[VDE2](http://vde.sourceforge.net)**|VDE is an ethernet compliant virtual network that can be spawned over a set of physical computer over the Internet. VDE is part of virtualsquare project.| -|**[VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe)**|Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java.| -|**XZ**| | +| Module | Description | +| -------| ----------- | +| [Bash](http://www.gnu.org/software/bash) | Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh). | +| [binutils](http://directory.fsf.org/project/binutils/) | binutils: GNU binary utilities | +| [bzip2](http://www.bzip.org/) | bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression. | +| [cURL](http://curl.haxx.se) | libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more. | +| [EasyBuild](http://hpcugent.github.com/easybuild/) | EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way. | +| [expat](http://expat.sourceforge.net/) | Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags) | +| git | | +| [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress | +| MATLAB | | +| [Mercurial](http://mercurial.selenic.com/) | Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface. | +| [numactl](http://oss.sgi.com/projects/libnuma/) | The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program. | +| pigz | | +| [QEMU](http://wiki.qemu.org/Main_Page) | QEMU is a generic and open source machine emulator and virtualizer. | +| [Szip](http://www.hdfgroup.org/doc_resource/SZIP/) | Szip compression software, providing lossless compression of scientific data | +| [tcsh](http://www.tcsh.org) | Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax. | +| [VDE2](http://vde.sourceforge.net) | VDE is an ethernet compliant virtual network that can be spawned over a set of physical computer over the Internet. VDE is part of virtualsquare project. | +| [VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe) | Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java. | +| XZ | | ## Vis -|Module|Description| -|--|--| -|**[gettext](http://www.gnu.org/software/gettext/)**|GNU `gettext' is an important step for the GNU Translation Project, as it is an asset on which we may build many other steps. This package offers to programmers, translators, and even users, a well integrated set of tools and documentation| -|**[GLib](http://www.gtk.org/)**|GLib is one of the base libraries of the GTK+ project| -|**[Tk](http://www.tcl.tk/)**|Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages.| -|**[VisIt](https://wci.llnl.gov/simulation/computer-codes/visit)**|VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool| +| Module | Description | +| -------| ----------- | +| [gettext](http://www.gnu.org/software/gettext/) | GNU `gettext' is an important step for the GNU Translation Project, as it is an asset on which we may build many other steps. This package offers to programmers, translators, and even users, a well integrated set of tools and documentation | +| [GLib](http://www.gtk.org/) | GLib is one of the base libraries of the GTK+ project | +| [Tk](http://www.tcl.tk/) | Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages. | +| [VisIt](https://wci.llnl.gov/simulation/computer-codes/visit) | VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool | diff --git a/docs.it4i/modules-salomon.md b/docs.it4i/modules-salomon.md index 99b6d5e85aec6a23ad1531e43d1926917cc2e9d0..65a3a3a626b9d9f264dafe5f5cc6508646a1b426 100644 --- a/docs.it4i/modules-salomon.md +++ b/docs.it4i/modules-salomon.md @@ -1,385 +1,385 @@ -# List of Available Modules +# Available Modules ## Core -|Module|Description| -|--|--| -|**lmod**| | -|**settarg**| | +| Module | Description | +| ------ | ----------- | +| lmod | | +| settarg | | ## Bio -|Module|Description| -|--|--| -|**[almost](http://www-almost.ch.cam.ac.uk/site)**|all atom molecular simulation toolkit - is a fast and flexible molecular modeling environment that provides powerful and efficient algorithms for molecular simulation, homology modeling, de novo design and ab-initio calculations.| -|**[Amber](http://ambermd.org)**|A set of molecular mechanical force fields for the simulation of biomolecules| -|**[BCFtools](http://www.htslib.org/)**|Samtools is a suite of programs for interacting with high-throughput sequencing data. BCFtools - Reading/writing BCF2/VCF/gVCF files and calling/filtering/summarising SNP and short indel sequence variants| -|**[BWA](http://bio-bwa.sourceforge.net/)**|Burrows-Wheeler Aligner (BWA) is an efficient program that aligns relatively short nucleotide sequences against a long reference sequence such as the human genome.| -|**[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**|FastQC is a quality control application for high throughput sequence data. It reads in sequence data in a variety of formats and can either provide an interactive application to review the results of several different QC checks, or create an HTML based report which can be integrated into a pipeline.| -|**[GATK](http://www.broadinstitute.org/gatk/)**|The Genome Analysis Toolkit or GATK is a software package developed at the Broad Institute to analyse next-generation resequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size.| -|**[GROMACS](http://www.gromacs.org)**|GROMACS is a versatile package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles.| -|**[HTSlib](http://www.htslib.org/)**|A C library for reading/writing high-throughput sequencing data. This package includes the utilities bgzip and tabix| -|**[picard](http://sourceforge.net/projects/picard)**|A set of tools (in Java) for working with next generation sequencing data in the BAM format.| -|**[PLUMED](http://www.plumed-code.org)**|PLUMED is an open source library for free energy calculations in molecular systems which works together with some of the most popular molecular dynamics engines. Free energy calculations can be performed as a function of many order parameters with a particular focus on biological problems, using state of the art methods such as metadynamics, umbrella sampling and Jarzynski-equation based steered MD. The software, written in C++, can be easily interfaced with both fortran and C/C++ codes.| -|**[RELION](http://www2.mrc-lmb.cam.ac.uk/relion/index.php/Main_Page)**|RELION (for REgularised LIkelihood OptimisatioN, pronounce rely-on) is a stand-alone computer program that employs an empirical Bayesian approach to refinement of (multiple) 3D reconstructions or 2D class averages in electron cryo-microscopy (cryo-EM).| -|**[SAMtools](http://www.htslib.org/)**|SAM Tools provide various utilities for manipulating alignments in the SAM format, including sorting, merging, indexing and generating alignments in a per-position format.| -|**[SnpEff](http://snpeff.sourceforge.net/)**|Genetic variant annotation and effect prediction toolbox.| -|**[Trimmomatic](http://www.usadellab.org/cms/?page=trimmomatic)**|Trimmomatic performs a variety of useful trimming tasks for illumina paired-end and single ended data.The selection of trimming steps and their associated parameters are supplied on the command line.| +| Module | Description | +| ------ | ----------- | +| [almost](http://www-almost.ch.cam.ac.uk/site) | all atom molecular simulation toolkit - is a fast and flexible molecular modeling environment that provides powerful and efficient algorithms for molecular simulation, homology modeling, de novo design and ab-initio calculations. | +| [Amber](http://ambermd.org) | A set of molecular mechanical force fields for the simulation of biomolecules | +| [BCFtools](http://www.htslib.org/) | Samtools is a suite of programs for interacting with high-throughput sequencing data. BCFtools - Reading/writing BCF2/VCF/gVCF files and calling/filtering/summarising SNP and short indel sequence variants | +| [BWA](http://bio-bwa.sourceforge.net/) | Burrows-Wheeler Aligner (BWA) is an efficient program that aligns relatively short nucleotide sequences against a long reference sequence such as the human genome. | +| [FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) | FastQC is a quality control application for high throughput sequence data. It reads in sequence data in a variety of formats and can either provide an interactive application to review the results of several different QC checks, or create an HTML based report which can be integrated into a pipeline. | +| [GATK](http://www.broadinstitute.org/gatk/) | The Genome Analysis Toolkit or GATK is a software package developed at the Broad Institute to analyse next-generation resequencing data. The toolkit offers a wide variety of tools, with a primary focus on variant discovery and genotyping as well as strong emphasis on data quality assurance. Its robust architecture, powerful processing engine and high-performance computing features make it capable of taking on projects of any size. | +| [GROMACS](http://www.gromacs.org) | GROMACS is a versatile package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles. | +| [HTSlib](http://www.htslib.org/) | A C library for reading/writing high-throughput sequencing data. This package includes the utilities bgzip and tabix | +| [picard](http://sourceforge.net/projects/picard) | A set of tools (in Java) for working with next generation sequencing data in the BAM format. | +| [PLUMED](http://www.plumed-code.org) | PLUMED is an open source library for free energy calculations in molecular systems which works together with some of the most popular molecular dynamics engines. Free energy calculations can be performed as a function of many order parameters with a particular focus on biological problems, using state of the art methods such as metadynamics, umbrella sampling and Jarzynski-equation based steered MD. The software, written in C++, can be easily interfaced with both fortran and C/C++ codes. | +| [RELION](http://www2.mrc-lmb.cam.ac.uk/relion/index.php/Main_Page) | RELION (for REgularised LIkelihood OptimisatioN, pronounce rely-on) is a stand-alone computer program that employs an empirical Bayesian approach to refinement of (multiple) 3D reconstructions or 2D class averages in electron cryo-microscopy (cryo-EM). | +| [SAMtools](http://www.htslib.org/) | SAM Tools provide various utilities for manipulating alignments in the SAM format, including sorting, merging, indexing and generating alignments in a per-position format. | +| [SnpEff](http://snpeff.sourceforge.net/) | Genetic variant annotation and effect prediction toolbox. | +| [Trimmomatic](http://www.usadellab.org/cms/?page=trimmomatic) | Trimmomatic performs a variety of useful trimming tasks for illumina paired-end and single ended data.The selection of trimming steps and their associated parameters are supplied on the command line. | ## Cae -|Module|Description| -|--|--| -|**Adams**| | -|**ANSYS**| | -|**COMSOL**| | -|**Digimat**| | -|**[FreeFem++](http://www.freefem.org)**|FreeFem++ is a partial differential equation solver. It has its own language. freefem scripts can solve multiphysics non linear systems in 2D and 3D. Problems involving PDE (2d, 3d) from several branches of physics such as fluid-structure interactions require interpolations of data on several meshes and their manipulation within one program. FreeFem++ includes a fast 2^d-tree-based interpolation algorithm and a language for the manipulation of data on multiple meshes (as a follow up of bamg (now a part of FreeFem++ ). FreeFem++ is written in C++ and the FreeFem++ language is a C++ idiom. It runs on Macs, Windows, Unix machines. FreeFem++ replaces the older freefem and freefem+.| -|**HyperWorks**| | -|**Marc**| | -|**[OpenFOAM](http://www.openfoam.com/)**|OpenFOAM is a free, open source CFD software package. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, to solid dynamics and electromagnetics.| +| Module | Description | +| ------ | ----------- | +| Adams | | +| ANSYS | | +| COMSOL | | +| Digimat | | +| [FreeFem++](http://www.freefem.org) | FreeFem++ is a partial differential equation solver. It has its own language. freefem scripts can solve multiphysics non linear systems in 2D and 3D. Problems involving PDE (2d, 3d) from several branches of physics such as fluid-structure interactions require interpolations of data on several meshes and their manipulation within one program. FreeFem++ includes a fast 2^d-tree-based interpolation algorithm and a language for the manipulation of data on multiple meshes (as a follow up of bamg (now a part of FreeFem++ ). FreeFem++ is written in C++ and the FreeFem++ language is a C++ idiom. It runs on Macs, Windows, Unix machines. FreeFem++ replaces the older freefem and freefem+. | +| HyperWorks | | +| Marc | | +| [OpenFOAM](http://www.openfoam.com/) | OpenFOAM is a free, open source CFD software package. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, to solid dynamics and electromagnetics. | ## Chem -|Module|Description| -|--|--| -|**[ABINIT](http://www.abinit.org/)**|Abinit is a plane wave pseudopotential code for doing condensed phase electronic structure calculations using DFT.| -|**[CP2K](http://www.cp2k.org/)**|CP2K is a freely available (GPL) program, written in Fortran 95, to perform atomistic and molecular simulations of solid state, liquid, molecular and biological systems. It provides a general framework for different methods such as e.g. density functional theory (DFT) using a mixed Gaussian and plane waves approach (GPW), and classical pair and many-body potentials.| -|**[LAMMPS](http://lammps.sandia.gov)**|LAMMPS is a classical molecular dynamics code, and an acronym for Large-scale Atomic/Molecular Massively Parallel Simulator. Has potentials for solid-state materials (metals, semiconductors) and soft matter (biomolecules, polymers) and coarse-grained or mesoscopic systems. It can be used to model atoms or, more generically, as a parallel particle simulator at the atomic, meso, or continuum scale.| -|**[libctl](http://ab-initio.mit.edu/libctl)**|libctl is a free Guile-based library implementing flexible control files for scientific simulations.| -|**[Libint](https://sourceforge.net/p/libint/)**|Libint library is used to evaluate the traditional (electron repulsion) and certain novel two-body matrix elements (integrals) over Cartesian Gaussian functions used in modern atomic and molecular theory.| -|**[libxc](http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)**|Libxc is a library of exchange-correlation functionals for density-functional theory. The aim is to provide a portable, well tested and reliable set of exchange and correlation functionals.| -|**Molpro**| | -|**[NAMD](http://www.ks.uiuc.edu/Research/namd/)**|NAMD is a parallel molecular dynamics code designed for high-performance simulation of large biomolecular systems.| -|**[NWChem](http://www.nwchem-sw.org)**|NWChem aims to provide its users with computational chemistry tools that are scalable both in their ability to treat large scientific computational chemistry problems efficiently, and in their use of available parallel computing resources from high-performance parallel supercomputers to conventional workstation clusters. NWChem software can handle: biomolecules, nanostructures, and solid-state; from quantum to classical, and all combinations; Gaussian basis functions or plane-waves; scaling from one to thousands of processors; properties and relativity.| -|**[ORCA](http://cec.mpg.de/forum/)**|ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry with specific emphasis on spectroscopic properties of open-shell molecules. It features a wide variety of standard quantum chemical methods ranging from semiempirical methods to DFT to single- and multireference correlated ab initio methods. It can also treat environmental and relativistic effects.| -|**[QuantumESPRESSO](http://www.pwscf.org/)**|Quantum ESPRESSO is an integrated suite of computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials (both norm-conserving and ultrasoft).| -|**[S4MPLE](http://infochim.u-strasbg.fr/spip.php?rubrique152)**|S4MPLE (Sampler For Multiple Protein-Ligand Entities) is a flexible molecular modeling tool, supporting empirical force field-driven conformational sampling and geometry optimization heuristics using a hybrid genetic algorithm (GA).| -|**Scipion**| | -|**[xdrfile](http://www.gromacs.org/Developer_Zone/Programming_Guide/XTC_Library)**|XTC library| +| Module | Description | +| ------ | ----------- | +| [ABINIT](http://www.abinit.org/) | Abinit is a plane wave pseudopotential code for doing condensed phase electronic structure calculations using DFT. | +| [CP2K](http://www.cp2k.org/) | CP2K is a freely available (GPL) program, written in Fortran 95, to perform atomistic and molecular simulations of solid state, liquid, molecular and biological systems. It provides a general framework for different methods such as e.g. density functional theory (DFT) using a mixed Gaussian and plane waves approach (GPW), and classical pair and many-body potentials. | +| [LAMMPS](http://lammps.sandia.gov) | LAMMPS is a classical molecular dynamics code, and an acronym for Large-scale Atomic/Molecular Massively Parallel Simulator. Has potentials for solid-state materials (metals, semiconductors) and soft matter (biomolecules, polymers) and coarse-grained or mesoscopic systems. It can be used to model atoms or, more generically, as a parallel particle simulator at the atomic, meso, or continuum scale. | +| [libctl](http://ab-initio.mit.edu/libctl) | libctl is a free Guile-based library implementing flexible control files for scientific simulations. | +| [Libint](https://sourceforge.net/p/libint/) | Libint library is used to evaluate the traditional (electron repulsion) and certain novel two-body matrix elements (integrals) over Cartesian Gaussian functions used in modern atomic and molecular theory. | +| [libxc](http://www.tddft.org/programs/octopus/wiki/index.php/Libxc) | Libxc is a library of exchange-correlation functionals for density-functional theory. The aim is to provide a portable, well tested and reliable set of exchange and correlation functionals. | +| Molpro | | +| [NAMD](http://www.ks.uiuc.edu/Research/namd/) | NAMD is a parallel molecular dynamics code designed for high-performance simulation of large biomolecular systems. | +| [NWChem](http://www.nwchem-sw.org) | NWChem aims to provide its users with computational chemistry tools that are scalable both in their ability to treat large scientific computational chemistry problems efficiently, and in their use of available parallel computing resources from high-performance parallel supercomputers to conventional workstation clusters. NWChem software can handle: biomolecules, nanostructures, and solid-state; from quantum to classical, and all combinations; Gaussian basis functions or plane-waves; scaling from one to thousands of processors; properties and relativity. | +| [ORCA](http://cec.mpg.de/forum/) | ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry with specific emphasis on spectroscopic properties of open-shell molecules. It features a wide variety of standard quantum chemical methods ranging from semiempirical methods to DFT to single- and multireference correlated ab initio methods. It can also treat environmental and relativistic effects. | +| [QuantumESPRESSO](http://www.pwscf.org/) | Quantum ESPRESSO is an integrated suite of computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials (both norm-conserving and ultrasoft). | +| [S4MPLE](http://infochim.u-strasbg.fr/spip.php?rubrique152) | S4MPLE (Sampler For Multiple Protein-Ligand Entities) is a flexible molecular modeling tool, supporting empirical force field-driven conformational sampling and geometry optimization heuristics using a hybrid genetic algorithm (GA). | +| Scipion | | +| [xdrfile](http://www.gromacs.org/Developer_Zone/Programming_Guide/XTC_Library) | XTC library | ## Compiler -|Module|Description| -|--|--| -|**[BerkeleyUPC](http://upc.lbl.gov)**|The goal of the Berkeley UPC compiler group is to develop a portable, high performance implementation of UPC for large-scale multiprocessors, PC clusters, and clusters of shared memory multiprocessors.| -|**[Clang](http://clang.llvm.org/)**|C, C++, Objective-C compiler, based on LLVM. Does not include C++ standard library -- use libstdc++ from GCC.| -|**[GCC](http://gcc.gnu.org/)**|The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...).| -|**[GCCcore](http://gcc.gnu.org/)**|The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...).| -|**[icc](http://software.intel.com/en-us/intel-compilers/)**|C and C++ compiler from Intel| -|**[ifort](http://software.intel.com/en-us/intel-compilers/)**|Fortran compiler from Intel| -|**[LLVM](http://llvm.org/)**|The LLVM Core libraries provide a modern source- and target-independent optimizer, along with code generation support for many popular CPUs (as well as some less common ones!) These libraries are built around a well specified code representation known as the LLVM intermediate representation ("LLVM IR"). The LLVM Core libraries are well documented, and it is particularly easy to invent your own language (or port an existing compiler) to use LLVM as an optimizer and code generator.| -|**[OpenCoarrays](http://www.opencoarrays.org/)**|A transport layer for coarray Fortran compilers.| -|**PGI**| | +| Module | Description | +| ------ | ----------- | +| [BerkeleyUPC](http://upc.lbl.gov) | The goal of the Berkeley UPC compiler group is to develop a portable, high performance implementation of UPC for large-scale multiprocessors, PC clusters, and clusters of shared memory multiprocessors. | +| [Clang](http://clang.llvm.org/) | C, C++, Objective-C compiler, based on LLVM. Does not include C++ standard library -- use libstdc++ from GCC. | +| [GCC](http://gcc.gnu.org/) | The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). | +| [GCCcore](http://gcc.gnu.org/) | The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). | +| [icc](http://software.intel.com/en-us/intel-compilers/) | C and C++ compiler from Intel | +| [ifort](http://software.intel.com/en-us/intel-compilers/) | Fortran compiler from Intel | +| [LLVM](http://llvm.org/) | The LLVM Core libraries provide a modern source- and target-independent optimizer, along with code generation support for many popular CPUs (as well as some less common ones!) These libraries are built around a well specified code representation known as the LLVM intermediate representation ("LLVM IR"). The LLVM Core libraries are well documented, and it is particularly easy to invent your own language (or port an existing compiler) to use LLVM as an optimizer and code generator. | +| [OpenCoarrays](http://www.opencoarrays.org/) | A transport layer for coarray Fortran compilers. | +| PGI | | ## Data -|Module|Description| -|--|--| -|**[GDAL](http://www.gdal.org/)**|GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing.| -|**[h5py](http://www.h5py.org/)**|HDF5 for Python (h5py) is a general-purpose Python interface to the Hierarchical Data Format library, version 5. HDF5 is a versatile, mature scientific software library designed for the fast, flexible storage of enormous amounts of data.| -|**[HDF5](http://www.hdfgroup.org/HDF5/)**|HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections.| -|**[netCDF](http://www.unidata.ucar.edu/software/netcdf/)**|NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data.| -|**[netCDF-Fortran](http://www.unidata.ucar.edu/software/netcdf/)**|NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data.| +| Module | Description | +| ------ | ----------- | +| [GDAL](http://www.gdal.org/) | GDAL is a translator library for raster geospatial data formats that is released under an X/MIT style Open Source license by the Open Source Geospatial Foundation. As a library, it presents a single abstract data model to the calling application for all supported formats. It also comes with a variety of useful commandline utilities for data translation and processing. | +| [h5py](http://www.h5py.org/) | HDF5 for Python (h5py) is a general-purpose Python interface to the Hierarchical Data Format library, version 5. HDF5 is a versatile, mature scientific software library designed for the fast, flexible storage of enormous amounts of data. | +| [HDF5](http://www.hdfgroup.org/HDF5/) | HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections. | +| [netCDF](http://www.unidata.ucar.edu/software/netcdf/) | NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. | +| [netCDF-Fortran](http://www.unidata.ucar.edu/software/netcdf/) | NetCDF (network Common Data Form) is a set of software libraries and machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. | ## Debugger -|Module|Description| -|--|--| -|**aislinn**| | -|**DDT**| | -|**[Forge](http://www.allinea.com/products/develop-allinea-forge)**|Allinea Forge is the complete toolsuite for software development - with everything needed to debug, profile, optimize, edit and build C, C++ and FORTRAN applications on Linux for high performance - from single threads through to complex parallel HPC codes with MPI, OpenMP, threads or CUDA.| -|**[PerformanceReports](http://www.allinea.com/products/allinea-performance-reports)**|Allinea Performance Reports are the most effective way to characterize and understand the performance of HPC application runs. One single-page HTML report elegantly answers a range of vital questions for any HPC site. - Is this application well-optimized for the system and the processors it is running on? - Does it benefit from running at this scale? - Are there I/O, networking or threading bottlenecks affecting performance? - Which hardware, software or configuration changes can we make to improve performance further. - How much energy did this application use?| -|**TotalView**| | -|**[Valgrind](http://valgrind.org/downloads/)**|Valgrind: Debugging and profiling tools| +| Module | Description | +| ------ | ----------- | +| aislinn | | +| DDT | | +| [Forge](http://www.allinea.com/products/develop-allinea-forge) | Allinea Forge is the complete toolsuite for software development - with everything needed to debug, profile, optimize, edit and build C, C++ and FORTRAN applications on Linux for high performance - from single threads through to complex parallel HPC codes with MPI, OpenMP, threads or CUDA. | +| [PerformanceReports](http://www.allinea.com/products/allinea-performance-reports) | Allinea Performance Reports are the most effective way to characterize and understand the performance of HPC application runs. One single-page HTML report elegantly answers a range of vital questions for any HPC site. - Is this application well-optimized for the system and the processors it is running on? - Does it benefit from running at this scale? - Are there I/O, networking or threading bottlenecks affecting performance? - Which hardware, software or configuration changes can we make to improve performance further. - How much energy did this application use? | +| TotalView | | +| [Valgrind](http://valgrind.org/downloads/) | Valgrind: Debugging and profiling tools | ## Devel -|Module|Description| -|--|--| -|**[ant](http://ant.apache.org/)**|Apache Ant is a Java library and command-line tool whose mission is to drive processes described in build files as targets and extension points dependent upon each other. The main known usage of Ant is the build of Java applications.| -|**[Autoconf](http://www.gnu.org/software/autoconf/)**|Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls.| -|**[Automake](http://www.gnu.org/software/automake/automake.html)**|Automake: GNU Standards-compliant Makefile generator| -|**[Autotools](http://autotools.io)**|This bundle collect the standard GNU build tools: Autoconf, Automake and libtool| -|**[Boost](http://www.boost.org/)**|Boost provides free peer-reviewed portable C++ source libraries.| -|**[CMake](http://www.cmake.org)**|CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software.| -|**[Doxygen](http://www.doxygen.org)**|Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D.| -|**[fontsproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 font extension wire protocol| -|**[glproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X protocol and ancillary headers| -|**[gperf](http://www.gnu.org/software/gperf/)**|GNU gperf is a perfect hash function generator. For a given list of strings, it produces a hash function and hash table, in form of C or C++ code, for looking up a value depending on the input string. The hash function is perfect, which means that the hash table has no collisions, and the hash table lookup needs a single string comparison only.| -|**[guile](http://www.gnu.org/software/guile)**|Guile is the GNU Ubiquitous Intelligent Language for Extensions, the official extension language for the GNU operating system.| -|**[JUnit](http://sourceforge.net/projects/junit)**|A programmer-oriented testing framework for Java.| -|**[libSM](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 Session Management library, which allows for applications to both manage sessions, and make use of session managers to save and restore their state for later use.| -|**[M4](http://www.gnu.org/software/m4/m4.html)**|GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc.| -|**[make](http://www.gnu.org/software/make/make.html)**|make-3.82: GNU version of make utility| -|**[makedepend](http://www.linuxfromscratch.org/blfs/view/svn/x/makedepend.html)**|The makedepend package contains a C-preprocessor like utility to determine build-time dependencies.| -|**[Maven](http://maven.apache.org/index.html)**|Binary maven install, Apache Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information.| -|**[ncurses](http://www.gnu.org/software/ncurses/)**|The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses.| -|**[PCRE](http://www.pcre.org/)**|The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5.| -|**[PCRE2](http://www.pcre.org/)**|The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5.| -|**[pkg-config](http://www.freedesktop.org/wiki/Software/pkg-config/)**|pkg-config is a helper tool used when compiling applications and libraries. It helps you insert the correct compiler options on the command line so an application can use gcc -o test test.c `pkg-config --libs --cflags glib-2.0` for instance, rather than hard-coding values on where to find glib (or other libraries).| -|**[python-meep](https://code.launchpad.net/python-meep)**|Python wrapper for the Meep FDTD solver.| -|**[Qt](http://qt-project.org/)**|Qt is a comprehensive cross-platform C++ application framework.| -|**[renderproto](http://www.freedesktop.org/wiki/Software/xlibs)**|Xrender protocol and ancillary headers| -|**[SCons](http://www.scons.org/)**|SCons is a software construction tool.| -|**[Spark](http://spark.apache.org)**|Spark is Hadoop MapReduce done in memory| -|**[SQLite](http://www.sqlite.org/)**|SQLite: SQL Database Engine in a C Library| -|**[SWIG](http://www.swig.org/)**|SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages.| -|**[xbitmaps](http://www.freedesktop.org/wiki/Software/xlibs)**|provides bitmaps for x| -|**[xcb-proto](http://xcb.freedesktop.org/)**|The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility.| -|**[xextproto](http://www.freedesktop.org/wiki/Software/xlibs)**|XExtProto protocol headers.| -|**[xineramaproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X protocol and ancillary headers for xinerama| -|**[xorg-macros](http://cgit.freedesktop.org/xorg/util/macros)**|X.org macros utilities.| -|**[xproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X protocol and ancillary headers| -|**[xtrans](http://www.freedesktop.org/wiki/Software/xlibs)**|xtrans includes a number of routines to make X implementations transport-independent; at time of writing, it includes support for UNIX sockets, IPv4, IPv6, and DECnet.| +| Module | Description | +| ------ | ----------- | +| [ant](http://ant.apache.org/) | Apache Ant is a Java library and command-line tool whose mission is to drive processes described in build files as targets and extension points dependent upon each other. The main known usage of Ant is the build of Java applications. | +| [Autoconf](http://www.gnu.org/software/autoconf/) | Autoconf is an extensible package of M4 macros that produce shell scripts to automatically configure software source code packages. These scripts can adapt the packages to many kinds of UNIX-like systems without manual user intervention. Autoconf creates a configuration script for a package from a template file that lists the operating system features that the package can use, in the form of M4 macro calls. | +| [Automake](http://www.gnu.org/software/automake/automake.html) | Automake: GNU Standards-compliant Makefile generator | +| [Autotools](http://autotools.io) | This bundle collect the standard GNU build tools: Autoconf, Automake and libtool | +| [Boost](http://www.boost.org/) | Boost provides free peer-reviewed portable C++ source libraries. | +| [CMake](http://www.cmake.org) | CMake, the cross-platform, open-source build system. CMake is a family of tools designed to build, test and package software. | +| [Doxygen](http://www.doxygen.org) | Doxygen is a documentation system for C++, C, Java, Objective-C, Python, IDL (Corba and Microsoft flavors), Fortran, VHDL, PHP, C#, and to some extent D. | +| [fontsproto](http://www.freedesktop.org/wiki/Software/xlibs) | X11 font extension wire protocol | +| [glproto](http://www.freedesktop.org/wiki/Software/xlibs) | X protocol and ancillary headers | +| [gperf](http://www.gnu.org/software/gperf/) | GNU gperf is a perfect hash function generator. For a given list of strings, it produces a hash function and hash table, in form of C or C++ code, for looking up a value depending on the input string. The hash function is perfect, which means that the hash table has no collisions, and the hash table lookup needs a single string comparison only. | +| [guile](http://www.gnu.org/software/guile) | Guile is the GNU Ubiquitous Intelligent Language for Extensions, the official extension language for the GNU operating system. | +| [JUnit](http://sourceforge.net/projects/junit) | A programmer-oriented testing framework for Java. | +| [libSM](http://www.freedesktop.org/wiki/Software/xlibs) | X11 Session Management library, which allows for applications to both manage sessions, and make use of session managers to save and restore their state for later use. | +| [M4](http://www.gnu.org/software/m4/m4.html) | GNU M4 is an implementation of the traditional Unix macro processor. It is mostly SVR4 compatible although it has some extensions (for example, handling more than 9 positional parameters to macros). GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc. | +| [make](http://www.gnu.org/software/make/make.html) | make-3.82: GNU version of make utility | +| [makedepend](http://www.linuxfromscratch.org/blfs/view/svn/x/makedepend.html) | The makedepend package contains a C-preprocessor like utility to determine build-time dependencies. | +| [Maven](http://maven.apache.org/index.html) | Binary maven install, Apache Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information. | +| [ncurses](http://www.gnu.org/software/ncurses/) | The Ncurses (new curses) library is a free software emulation of curses in System V Release 4.0, and more. It uses Terminfo format, supports pads and color and multiple highlights and forms characters and function-key mapping, and has all the other SYSV-curses enhancements over BSD Curses. | +| [PCRE](http://www.pcre.org/) | The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. | +| [PCRE2](http://www.pcre.org/) | The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. | +| [pkg-config](http://www.freedesktop.org/wiki/Software/pkg-config/) | pkg-config is a helper tool used when compiling applications and libraries. It helps you insert the correct compiler options on the command line so an application can use gcc -o test test.c `pkg-config --libs --cflags glib-2.0` for instance, rather than hard-coding values on where to find glib (or other libraries). | +| [python-meep](https://code.launchpad.net/python-meep) | Python wrapper for the Meep FDTD solver. | +| [Qt](http://qt-project.org/) | Qt is a comprehensive cross-platform C++ application framework. | +| [renderproto](http://www.freedesktop.org/wiki/Software/xlibs) | Xrender protocol and ancillary headers | +| [SCons](http://www.scons.org/) | SCons is a software construction tool. | +| [Spark](http://spark.apache.org) | Spark is Hadoop MapReduce done in memory | +| [SQLite](http://www.sqlite.org/) | SQLite: SQL Database Engine in a C Library | +| [SWIG](http://www.swig.org/) | SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages. | +| [xbitmaps](http://www.freedesktop.org/wiki/Software/xlibs) | provides bitmaps for x | +| [xcb-proto](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. | +| [xextproto](http://www.freedesktop.org/wiki/Software/xlibs) | XExtProto protocol headers. | +| [xineramaproto](http://www.freedesktop.org/wiki/Software/xlibs) | X protocol and ancillary headers for xinerama | +| [xorg-macros](http://cgit.freedesktop.org/xorg/util/macros) | X.org macros utilities. | +| [xproto](http://www.freedesktop.org/wiki/Software/xlibs) | X protocol and ancillary headers | +| [xtrans](http://www.freedesktop.org/wiki/Software/xlibs) | xtrans includes a number of routines to make X implementations transport-independent; at time of writing, it includes support for UNIX sockets, IPv4, IPv6, and DECnet. | ## Geo -|Module|Description| -|--|--| -|**[DCW](http://gmt.soest.hawaii.edu/projects/gmt)**|country polygons for GMT| -|**[GMT](http://gmt.soest.hawaii.edu/)**|GMT is an open source collection of about 80 command-line tools for manipulating geographic and Cartesian data sets (including filtering, trend fitting, gridding, projecting, etc.) and producing PostScript illustrations ranging from simple x-y plots via contour maps to artificially illuminated surfaces and 3D perspective views; the GMT supplements add another 40 more specialized and discipline-specific tools.| -|**[PROJ_4](http://proj.osgeo.org)**|PROJ.4 - Cartographic Projections Library originally written by Gerald Evenden then of the USGS.| +| Module | Description | +| ------ | ----------- | +| [DCW](http://gmt.soest.hawaii.edu/projects/gmt) | country polygons for GMT | +| [GMT](http://gmt.soest.hawaii.edu/) | GMT is an open source collection of about 80 command-line tools for manipulating geographic and Cartesian data sets (including filtering, trend fitting, gridding, projecting, etc.) and producing PostScript illustrations ranging from simple x-y plots via contour maps to artificially illuminated surfaces and 3D perspective views; the GMT supplements add another 40 more specialized and discipline-specific tools. | +| [PROJ_4](http://proj.osgeo.org) | PROJ.4 - Cartographic Projections Library originally written by Gerald Evenden then of the USGS. | ## Lang -|Module|Description| -|--|--| -|**[Bison](http://www.gnu.org/software/bison)**|Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables.| -|**[byacc](http://invisible-island.net/byacc/byacc.html)**|Berkeley Yacc (byacc) is generally conceded to be the best yacc variant available. In contrast to bison, it is written to avoid dependencies upon a particular compiler.| -|**[flex](http://flex.sourceforge.net/)**|Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text.| -|**[Java](http://java.com/)**|Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers.| -|**[libgdiplus](https://github.com/mono/libgdiplus)**|An Open Source implementation of the GDI+ API.| -|**[Lua](http://www.lua.org/)**|Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping.| -|**[Mono](http://www.mono-project.com)**|Mono is an open source implementation of Microsoft's .NET Framework based on the ECMA standards for C# and the Common Language Runtime.| -|**[NASM](http://www.nasm.us/)**|NASM: General-purpose x86 assembler| -|**[OpenCL-builder](https://software.intel.com/en-us/intel-opencl)**|OpenCL™ is the first open, royalty-free standard for cross-platform, parallel programming of modern processors found in personal computers, servers and handheld/embedded devices. OpenCL (Open Computing Language) greatly improves speed and responsiveness for a wide spectrum of applications in numerous market categories from gaming and entertainment to scientific and medical software. This is builder (formerly runtime) package.| -|**[OpenCL-runtime](https://software.intel.com/en-us/intel-opencl)**|OpenCL™ is the first open, royalty-free standard for cross-platform, parallel programming of modern processors found in personal computers, servers and handheld/embedded devices. OpenCL (Open Computing Language) greatly improves speed and responsiveness for a wide spectrum of applications in numerous market categories from gaming and entertainment to scientific and medical software.| -|**[Perl](http://www.perl.org/)**|Larry Wall's Practical Extraction and Report Language| -|**[Python](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.| -|**[R](http://www.r-project.org/)**|R is a free software environment for statistical computing and graphics.| -|**[Racket](http://racket-lang.org)**|Racket is a full-spectrum programming language. It goes beyond Lisp and Scheme with dialects that support objects, types, laziness, and more.| -|**[Ruby](https://www.ruby-lang.org)**|Ruby is a dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write.| -|**[SIP](http://www.riverbankcomputing.com/software/sip/)**|SIP is a tool that makes it very easy to create Python bindings for C and C++ libraries.| -|**[SnuCL](http://snucl.snu.ac.kr)**|An OpenCL Framework for Heterogeneous Clusters| -|**[Tcl](http://www.tcl.tk/)**|Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more.| +| Module | Description | +| ------ | ----------- | +| [Bison](http://www.gnu.org/software/bison) | Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. | +| [byacc](http://invisible-island.net/byacc/byacc.html) | Berkeley Yacc (byacc) is generally conceded to be the best yacc variant available. In contrast to bison, it is written to avoid dependencies upon a particular compiler. | +| [flex](http://flex.sourceforge.net/) | Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, sometimes called a tokenizer, is a program which recognizes lexical patterns in text. | +| [Java](http://java.com/) | Java Platform, Standard Edition (Java SE) lets you develop and deploy Java applications on desktops and servers. | +| [libgdiplus](https://github.com/mono/libgdiplus) | An Open Source implementation of the GDI+ API. | +| [Lua](http://www.lua.org/) | Lua is a powerful, fast, lightweight, embeddable scripting language. Lua combines simple procedural syntax with powerful data description constructs based on associative arrays and extensible semantics. Lua is dynamically typed, runs by interpreting bytecode for a register-based virtual machine, and has automatic memory management with incremental garbage collection, making it ideal for configuration, scripting, and rapid prototyping. | +| [Mono](http://www.mono-project.com) | Mono is an open source implementation of Microsoft's .NET Framework based on the ECMA standards for C# and the Common Language Runtime. | +| [NASM](http://www.nasm.us/) | NASM: General-purpose x86 assembler | +| [OpenCL-builder](https://software.intel.com/en-us/intel-opencl) | OpenCL™ is the first open, royalty-free standard for cross-platform, parallel programming of modern processors found in personal computers, servers and handheld/embedded devices. OpenCL (Open Computing Language) greatly improves speed and responsiveness for a wide spectrum of applications in numerous market categories from gaming and entertainment to scientific and medical software. This is builder (formerly runtime) package. | +| [OpenCL-runtime](https://software.intel.com/en-us/intel-opencl) | OpenCL™ is the first open, royalty-free standard for cross-platform, parallel programming of modern processors found in personal computers, servers and handheld/embedded devices. OpenCL (Open Computing Language) greatly improves speed and responsiveness for a wide spectrum of applications in numerous market categories from gaming and entertainment to scientific and medical software. | +| [Perl](http://www.perl.org/) | Larry Wall's Practical Extraction and Report Language | +| [Python](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| [R](http://www.r-project.org/) | R is a free software environment for statistical computing and graphics. | +| [Racket](http://racket-lang.org) | Racket is a full-spectrum programming language. It goes beyond Lisp and Scheme with dialects that support objects, types, laziness, and more. | +| [Ruby](https://www.ruby-lang.org) | Ruby is a dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write. | +| [SIP](http://www.riverbankcomputing.com/software/sip/) | SIP is a tool that makes it very easy to create Python bindings for C and C++ libraries. | +| [SnuCL](http://snucl.snu.ac.kr) | An OpenCL Framework for Heterogeneous Clusters | +| [Tcl](http://www.tcl.tk/) | Tcl (Tool Command Language) is a very powerful but easy to learn dynamic programming language, suitable for a very wide range of uses, including web and desktop applications, networking, administration, testing and many more. | ## Lib -|Module|Description| -|--|--| -|**[FOX](http://fox-toolkit.org)**|FOX is a C++ based Toolkit for developing Graphical User Interfaces easily and effectively. It offers a wide, and growing, collection of Controls, and provides state of the art facilities such as drag and drop, selection, as well as OpenGL widgets for 3D graphical manipulation.| -|**[libdrm](http://dri.freedesktop.org)**|Direct Rendering Manager runtime library.| -|**[libffi](http://sourceware.org/libffi/)**|The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time.| -|**[libfontenc](http://www.freedesktop.org/wiki/Software/xlibs/)**|X11 font encoding library| -|**[libjpeg-turbo](http://sourceforge.net/libjpeg-turbo/)**|libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding.| -|**[libmatheval](http://www.gnu.org/software/libmatheval/)**|GNU libmatheval is a library (callable from C and Fortran) to parse and evaluate symbolic expressions input as text.| -|**[libMesh](http://libmesh.github.io/)**|The libMesh library provides a framework for the numerical simulation of partial differential equations using arbitrary unstructured discretizations on serial and parallel platforms. A major goal of the library is to provide support for adaptive mesh refinement (AMR) computations in parallel while allowing a research scientist to focus on the physics they are modeling.| -|**[libpng](http://www.libpng.org/pub/png/libpng.html)**|libpng is the official PNG reference library| -|**[libpthread-stubs](http://xcb.freedesktop.org/)**|The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility.| -|**[libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html)**|The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands.| -|**[LibTIFF](http://www.remotesensing.org/libtiff/)**|tiff: Library and tools for reading and writing TIFF data files| -|**[libtool](http://www.gnu.org/software/libtool)**|GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface.| -|**[libunistring](http://www.gnu.org/software/libunistring/)**|This library provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard.| -|**[libunwind](http://www.nongnu.org/libunwind/)**|The primary goal of libunwind is to define a portable and efficient C programming interface (API) to determine the call-chain of a program. The API additionally provides the means to manipulate the preserved (callee-saved) state of each call-frame and to resume execution at any point in the call-chain (non-local goto). The API supports both local (same-process) and remote (across-process) operation. As such, the API is useful in a number of applications| -|**[libxcb](http://xcb.freedesktop.org/)**|The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility.| -|**[libxml2](http://xmlsoft.org/)**|Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform).| -|**libxslt**| | -|**libyaml**| | -|**lxml**| | -|**[OpenCoarrays](http://www.opencoarrays.org)**|OpenCoarrays is an open-source software project for developing, porting and tuning transport layers that support coarray Fortran compilers.| -|**[PROJ](http://trac.osgeo.org/proj/)**|Program proj is a standard Unix filter function which converts geographic longitude and latitude coordinates into cartesian coordinates| -|**PyYAML**| | -|**[QCA](http://delta.affinix.com/qca/)**|QCA aims to provide a straightforward and cross-platform crypto API, using Qt datatypes and conventions. QCA separates the API from the implementation, using plugins known as Providers| -|**[QGIS](http://www.qgis.org)**|A Free and Open Source Geographic Information System| -|**[Qwt](http://qwt.sourceforge.net/index.html)**|The Qwt library contains GUI Components and utility classes which are primarily useful for programs with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, thermometers, wheels and knobs to control or display values, arrays, or ranges of type double.| -|**[SIONlib](http://www.fz-juelich.de/ias/jsc/EN/Expertise/Support/Software/SIONlib/_node.html)**|SIONlib is a scalable I/O library for parallel access to task-local files. The library not only supports writing and reading binary data to or from several thousands of processors into a single or a small number of physical files, but also provides global open and close functions to access SIONlib files in parallel. This package provides a stripped-down installation of SIONlib for use with performance tools (e.g., Score-P), with renamed symbols to avoid conflicts when an application using SIONlib itself is linked against a tool requiring a different SIONlib version.| -|**[spatialindex](https://libspatialindex.github.io/index.html)**|The purpose of this library is to provide: * An extensible framework that will support robust spatial indexing methods. * Support for sophisticated spatial queries. Range, point location, nearest neighbor and k-nearest neighbor as well as parametric queries (defined by spatial constraints) should be easy to deploy and run. * Easy to use interfaces for inserting, deleting and updating information.| -|**[SpatiaLite](https://www.gaia-gis.it/fossil/libspatialite/index)**|SpatiaLite is an open source library intended to extend the SQLite core to support fully fledged Spatial SQL capabilities.| -|**[tbb](http://software.intel.com/en-us/articles/intel-tbb/)**|Intel Threading Building Blocks 4.0 (Intel TBB) is a widely used, award-winning C++ template library for creating reliable, portable, and scalable parallel applications. Use Intel TBB for a simple and rapid way of developing robust task-based parallel applications that scale to available processor cores, are compatible with multiple environments, and are easier to maintain. Intel TBB is the most proficient way to implement future-proof parallel applications that tap into the power and performance of multicore and manycore hardware platforms.| -|**[zlib](http://www.zlib.net/)**|zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system.| +| Module | Description | +| ------ | ----------- | +| [FOX](http://fox-toolkit.org) | FOX is a C++ based Toolkit for developing Graphical User Interfaces easily and effectively. It offers a wide, and growing, collection of Controls, and provides state of the art facilities such as drag and drop, selection, as well as OpenGL widgets for 3D graphical manipulation. | +| [libdrm](http://dri.freedesktop.org) | Direct Rendering Manager runtime library. | +| [libffi](http://sourceware.org/libffi/) | The libffi library provides a portable, high level programming interface to various calling conventions. This allows a programmer to call any function specified by a call interface description at run-time. | +| [libfontenc](http://www.freedesktop.org/wiki/Software/xlibs/) | X11 font encoding library | +| [libjpeg-turbo](http://sourceforge.net/libjpeg-turbo/) | libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to accelerate baseline JPEG compression and decompression. libjpeg is a library that implements JPEG image encoding, decoding and transcoding. | +| [libmatheval](http://www.gnu.org/software/libmatheval/) | GNU libmatheval is a library (callable from C and Fortran) to parse and evaluate symbolic expressions input as text. | +| [libMesh](http://libmesh.github.io/) | The libMesh library provides a framework for the numerical simulation of partial differential equations using arbitrary unstructured discretizations on serial and parallel platforms. A major goal of the library is to provide support for adaptive mesh refinement (AMR) computations in parallel while allowing a research scientist to focus on the physics they are modeling. | +| [libpng](http://www.libpng.org/pub/png/libpng.html) | libpng is the official PNG reference library | +| [libpthread-stubs](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. | +| [libreadline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html) | The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. Both Emacs and vi editing modes are available. The Readline library includes additional functions to maintain a list of previously-entered command lines, to recall and perhaps reedit those lines, and perform csh-like history expansion on previous commands. | +| [LibTIFF](http://www.remotesensing.org/libtiff/) | tiff: Library and tools for reading and writing TIFF data files | +| [libtool](http://www.gnu.org/software/libtool) | GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface. | +| [libunistring](http://www.gnu.org/software/libunistring/) | This library provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard. | +| [libunwind](http://www.nongnu.org/libunwind/) | The primary goal of libunwind is to define a portable and efficient C programming interface (API) to determine the call-chain of a program. The API additionally provides the means to manipulate the preserved (callee-saved) state of each call-frame and to resume execution at any point in the call-chain (non-local goto). The API supports both local (same-process) and remote (across-process) operation. As such, the API is useful in a number of applications | +| [libxcb](http://xcb.freedesktop.org/) | The X protocol C-language Binding (XCB) is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. | +| [libxml2](http://xmlsoft.org/) | Libxml2 is the XML C parser and toolchain developed for the Gnome project (but usable outside of the Gnome platform). | +| libxslt | | +| libyaml | | +| lxml | | +| [OpenCoarrays](http://www.opencoarrays.org) | OpenCoarrays is an open-source software project for developing, porting and tuning transport layers that support coarray Fortran compilers. | +| [PROJ](http://trac.osgeo.org/proj/) | Program proj is a standard Unix filter function which converts geographic longitude and latitude coordinates into cartesian coordinates | +| PyYAML | | +| [QCA](http://delta.affinix.com/qca/) | QCA aims to provide a straightforward and cross-platform crypto API, using Qt datatypes and conventions. QCA separates the API from the implementation, using plugins known as Providers | +| [QGIS](http://www.qgis.org) | A Free and Open Source Geographic Information System | +| [Qwt](http://qwt.sourceforge.net/index.html) | The Qwt library contains GUI Components and utility classes which are primarily useful for programs with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, thermometers, wheels and knobs to control or display values, arrays, or ranges of type double. | +| [SIONlib](http://www.fz-juelich.de/ias/jsc/EN/Expertise/Support/Software/SIONlib/_node.html) | SIONlib is a scalable I/O library for parallel access to task-local files. The library not only supports writing and reading binary data to or from several thousands of processors into a single or a small number of physical files, but also provides global open and close functions to access SIONlib files in parallel. This package provides a stripped-down installation of SIONlib for use with performance tools (e.g., Score-P), with renamed symbols to avoid conflicts when an application using SIONlib itself is linked against a tool requiring a different SIONlib version. | +| [spatialindex](https://libspatialindex.github.io/index.html) | The purpose of this library is to provide: * An extensible framework that will support robust spatial indexing methods. * Support for sophisticated spatial queries. Range, point location, nearest neighbor and k-nearest neighbor as well as parametric queries (defined by spatial constraints) should be easy to deploy and run. * Easy to use interfaces for inserting, deleting and updating information. | +| [SpatiaLite](https://www.gaia-gis.it/fossil/libspatialite/index) | SpatiaLite is an open source library intended to extend the SQLite core to support fully fledged Spatial SQL capabilities. | +| [tbb](http://software.intel.com/en-us/articles/intel-tbb/) | Intel Threading Building Blocks 4.0 (Intel TBB) is a widely used, award-winning C++ template library for creating reliable, portable, and scalable parallel applications. Use Intel TBB for a simple and rapid way of developing robust task-based parallel applications that scale to available processor cores, are compatible with multiple environments, and are easier to maintain. Intel TBB is the most proficient way to implement future-proof parallel applications that tap into the power and performance of multicore and manycore hardware platforms. | +| [zlib](http://www.zlib.net/) | zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system. | ## Math -|Module|Description| -|--|--| -|**[FIAT](https://bitbucket.org/fenics-project/fiat)**|The FInite element Automatic Tabulator FIAT supports generation of arbitrary order instances of the Lagrange elements on lines, triangles, and tetrahedra. It is also capable of generating arbitrary order instances of Jacobi-type quadrature rules on the same element shapes.| -|**[GEOS](http://trac.osgeo.org/geos)**|GEOS (Geometry Engine - Open Source) is a C++ port of the Java Topology Suite (JTS)| -|**[GMP](http://gmplib.org/)**|GMP is a free library for arbitrary precision arithmetic, operating on signed integers, rational numbers, and floating point numbers.| -|**[Harminv](http://ab-initio.mit.edu/wiki/index.php/Harminv)**|Harminv is a free program (and accompanying library) to solve the problem of harmonic inversion - given a discrete-time, finite-length signal that consists of a sum of finitely-many sinusoids (possibly exponentially decaying) in a given bandwidth, it determines the frequencies, decay constants, amplitudes, and phases of those sinusoids.| -|**[ISL](http://isl.gforge.inria.fr/)**|isl is a library for manipulating sets and relations of integer points bounded by linear constraints.| -|**[METIS](http://glaros.dtc.umn.edu/gkhome/metis/metis/overview)**|METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill reducing orderings for sparse matrices. The algorithms implemented in METIS are based on the multilevel recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes.| -|**MPC**| | -|**[MPFR](http://www.mpfr.org)**|The MPFR library is a C library for multiple-precision floating-point computations with correct rounding.| -|**[numpy](http://www.numpy.org)**|NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object, sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code, useful linear algebra, Fourier transform, and random number capabilities. Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases.| -|**[Octave](http://www.gnu.org/software/octave/)**|GNU Octave is a high-level interpreted language, primarily intended for numerical computations.| -|**[ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview)**|ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured graphs, meshes, and for computing fill-reducing orderings of sparse matrices. ParMETIS extends the functionality provided by METIS and includes routines that are especially suited for parallel AMR computations and large scale numerical simulations. The algorithms implemented in ParMETIS are based on the parallel multilevel k-way graph-partitioning, adaptive repartitioning, and parallel multi-constrained partitioning schemes.| -|**[ScientificPython](https://sourcesup.cru.fr/projects/scientific-py/)**|ScientificPython is a collection of Python modules for scientific computing. It contains support for geometry, mathematical functions, statistics, physical units, IO, visualization, and parallelization.| -|**[SCOTCH](http://gforge.inria.fr/projects/scotch/)**|Software package and libraries for sequential and parallel graph partitioning, static mapping, and sparse matrix block ordering, and sequential mesh and hypergraph partitioning.| -|**[sympy](http://sympy.org/)**|SymPy is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) while keeping the code as simple as possible in order to be comprehensible and easily extensible. SymPy is written entirely in Python and does not require any external libraries.| +| Module | Description | +| ------ | ----------- | +| [FIAT](https://bitbucket.org/fenics-project/fiat) | The FInite element Automatic Tabulator FIAT supports generation of arbitrary order instances of the Lagrange elements on lines, triangles, and tetrahedra. It is also capable of generating arbitrary order instances of Jacobi-type quadrature rules on the same element shapes. | +| [GEOS](http://trac.osgeo.org/geos) | GEOS (Geometry Engine - Open Source) is a C++ port of the Java Topology Suite (JTS) | +| [GMP](http://gmplib.org/) | GMP is a free library for arbitrary precision arithmetic, operating on signed integers, rational numbers, and floating point numbers. | +| [Harminv](http://ab-initio.mit.edu/wiki/index.php/Harminv) | Harminv is a free program (and accompanying library) to solve the problem of harmonic inversion - given a discrete-time, finite-length signal that consists of a sum of finitely-many sinusoids (possibly exponentially decaying) in a given bandwidth, it determines the frequencies, decay constants, amplitudes, and phases of those sinusoids. | +| [ISL](http://isl.gforge.inria.fr/) | isl is a library for manipulating sets and relations of integer points bounded by linear constraints. | +| [METIS](http://glaros.dtc.umn.edu/gkhome/metis/metis/overview) | METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill reducing orderings for sparse matrices. The algorithms implemented in METIS are based on the multilevel recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes. | +| MPC | | +| [MPFR](http://www.mpfr.org) | The MPFR library is a C library for multiple-precision floating-point computations with correct rounding. | +| [numpy](http://www.numpy.org) | NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object, sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code, useful linear algebra, Fourier transform, and random number capabilities. Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases. | +| [Octave](http://www.gnu.org/software/octave/) | GNU Octave is a high-level interpreted language, primarily intended for numerical computations. | +| [ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview) | ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured graphs, meshes, and for computing fill-reducing orderings of sparse matrices. ParMETIS extends the functionality provided by METIS and includes routines that are especially suited for parallel AMR computations and large scale numerical simulations. The algorithms implemented in ParMETIS are based on the parallel multilevel k-way graph-partitioning, adaptive repartitioning, and parallel multi-constrained partitioning schemes. | +| [ScientificPython](https://sourcesup.cru.fr/projects/scientific-py/) | ScientificPython is a collection of Python modules for scientific computing. It contains support for geometry, mathematical functions, statistics, physical units, IO, visualization, and parallelization. | +| [SCOTCH](http://gforge.inria.fr/projects/scotch/) | Software package and libraries for sequential and parallel graph partitioning, static mapping, and sparse matrix block ordering, and sequential mesh and hypergraph partitioning. | +| [sympy](http://sympy.org/) | SymPy is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) while keeping the code as simple as possible in order to be comprehensible and easily extensible. SymPy is written entirely in Python and does not require any external libraries. | ## Mpi -|Module|Description| -|--|--| -|**[impi](http://software.intel.com/en-us/intel-mpi-library/)**|The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification.| -|**[MPI_NET](http://www.osl.iu.edu/research/mpi.net/)**|MPI.NET is a high-performance, easy-to-use implementation of the Message Passing Interface (MPI) for Microsoft's .NET environment| -|**[MPICH](http://www.mpich.org/)**|MPICH v3.x is an open source high-performance MPI 3.0 implementation. It does not support InfiniBand (use MVAPICH2 with InfiniBand devices).| -|**mpt**| | -|**[MVAPICH2](http://mvapich.cse.ohio-state.edu/overview/mvapich2/)**|This is an MPI 3.0 implementation. It is based on MPICH2 and MVICH.| -|**[OpenMPI](http://www.open-mpi.org/)**|The Open MPI Project is an open source MPI-2 implementation.| +| Module | Description | +| ------ | ----------- | +| [impi](http://software.intel.com/en-us/intel-mpi-library/) | The Intel(R) MPI Library for Linux* OS is a multi-fabric message passing library based on ANL MPICH2 and OSU MVAPICH2. The Intel MPI Library for Linux OS implements the Message Passing Interface, version 2 (MPI-2) specification. | +| [MPI_NET](http://www.osl.iu.edu/research/mpi.net/) | MPI.NET is a high-performance, easy-to-use implementation of the Message Passing Interface (MPI) for Microsoft's .NET environment | +| [MPICH](http://www.mpich.org/) | MPICH v3.x is an open source high-performance MPI 3.0 implementation. It does not support InfiniBand (use MVAPICH2 with InfiniBand devices). | +| mpt | | +| [MVAPICH2](http://mvapich.cse.ohio-state.edu/overview/mvapich2/) | This is an MPI 3.0 implementation. It is based on MPICH2 and MVICH. | +| [OpenMPI](http://www.open-mpi.org/) | The Open MPI Project is an open source MPI-2 implementation. | ## Numlib -|Module|Description| -|--|--| -|**[Armadillo](http://arma.sourceforge.net/)**|Armadillo is an open-source C++ linear algebra library (matrix maths) aiming towards a good balance between speed and ease of use. Integer, floating point and complex numbers are supported, as well as a subset of trigonometric and statistics functions.| -|**[arpack-ng](http://forge.scilab.org/index.php/p/arpack-ng/)**|ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems.| -|**[FFTW](http://www.fftw.org)**|FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data.| -|**[GSL](http://www.gnu.org/software/gsl/)**|The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting.| -|**[Hypre](https://computation.llnl.gov/casc/linear_solvers/sls_hypre.html)**|Hypre is a library for solving large, sparse linear systems of equations on massively parallel computers. The problems of interest arise in the simulation codes being developed at LLNL and elsewhere to study physical phenomena in the defense, environmental, energy, and biological sciences.| -|**[imkl](http://software.intel.com/en-us/intel-mkl/)**|Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more.| -|**[LAPACKE](http://www.netlib.org/lapack/lapacke.html)**|LAPACKE C Interface to LAPACK header files and library| -|**[OpenBLAS](http://xianyi.github.com/OpenBLAS/)**|OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.| -|**[PETSc](http://www.mcs.anl.gov/petsc)**|PETSc, pronounced PET-see (the S is silent), is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations.| -|**[ScaLAPACK](http://www.netlib.org/scalapack/)**|The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers.| -|**[SuiteSparse](http://www.cise.ufl.edu/research/sparse/SuiteSparse/)**|SuiteSparse is a collection of libraries manipulate sparse matrices.| +| Module | Description | +| ------ | ----------- | +| [Armadillo](http://arma.sourceforge.net/) | Armadillo is an open-source C++ linear algebra library (matrix maths) aiming towards a good balance between speed and ease of use. Integer, floating point and complex numbers are supported, as well as a subset of trigonometric and statistics functions. | +| [arpack-ng](http://forge.scilab.org/index.php/p/arpack-ng/) | ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. | +| [FFTW](http://www.fftw.org) | FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data. | +| [GSL](http://www.gnu.org/software/gsl/) | The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting. | +| [Hypre](https://computation.llnl.gov/casc/linear_solvers/sls_hypre.html) | Hypre is a library for solving large, sparse linear systems of equations on massively parallel computers. The problems of interest arise in the simulation codes being developed at LLNL and elsewhere to study physical phenomena in the defense, environmental, energy, and biological sciences. | +| [imkl](http://software.intel.com/en-us/intel-mkl/) | Intel Math Kernel Library is a library of highly optimized, extensively threaded math routines for science, engineering, and financial applications that require maximum performance. Core math functions include BLAS, LAPACK, ScaLAPACK, Sparse Solvers, Fast Fourier Transforms, Vector Math, and more. | +| [LAPACKE](http://www.netlib.org/lapack/lapacke.html) | LAPACKE C Interface to LAPACK header files and library | +| [OpenBLAS](http://xianyi.github.com/OpenBLAS/) | OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version. | +| [PETSc](http://www.mcs.anl.gov/petsc) | PETSc, pronounced PET-see (the S is silent), is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations. | +| [ScaLAPACK](http://www.netlib.org/scalapack/) | The ScaLAPACK (or Scalable LAPACK) library includes a subset of LAPACK routines redesigned for distributed memory MIMD parallel computers. | +| [SuiteSparse](http://www.cise.ufl.edu/research/sparse/SuiteSparse/) | SuiteSparse is a collection of libraries manipulate sparse matrices. | ## Perf -|Module|Description| -|--|--| -|**[Advisor](https://software.intel.com/intel-advisor-xe)**|Vectorization Optimization and Thread Prototyping - Vectorize & thread code or performance “dies” - Easy workflow + data + tips = faster code faster - Prioritize, Prototype & Predict performance gain| -|**[Cube](http://www.scalasca.org/software/cube-4.x/download.html)**|Cube, which is used as performance report explorer for Scalasca and Score-P, is a generic tool for displaying a multi-dimensional performance space consisting of the dimensions (i) performance metric, (ii) call path, and (iii) system resource. Each dimension can be represented as a tree, where non-leaf nodes of the tree can be collapsed or expanded to achieve the desired level of granularity.| -|**[ipp](http://software.intel.com/en-us/articles/intel-ipp/)**|Intel Integrated Performance Primitives (Intel IPP) is an extensive library of multicore-ready, highly optimized software functions for multimedia, data processing, and communications applications. Intel IPP offers thousands of optimized functions covering frequently used fundamental algorithms.| -|**MAP**| | -|**[OPARI2](http://www.score-p.org)**|OPARI2, the successor of Forschungszentrum Juelich's OPARI, is a source-to-source instrumentation tool for OpenMP and hybrid codes. It surrounds OpenMP directives and runtime library calls with calls to the POMP2 measurement interface.| -|**[OTF2](http://www.score-p.org)**|The Open Trace Format 2 is a highly scalable, memory efficient event trace data format plus support library. It will become the new standard trace format for Scalasca, Vampir, and Tau and is open for other tools.| -|**[PAPI](http://icl.cs.utk.edu/projects/papi/)**|PAPI provides the tool designer and application engineer with a consistent interface and methodology for use of the performance counter hardware found in most major microprocessors. PAPI enables software engineers to see, in near real time, the relation between software performance and processor events. In addition Component PAPI provides access to a collection of components that expose performance measurement opportunites across the hardware and software stack.| -|**perfboost**| | -|**perfcatcher**| | -|**PerfReports**| | -|**perfsuite**| | -|**[Vampir](http://www.vampir.eu)**|The Vampir software tool provides an easy-to-use framework that enables developers to quickly display and analyze arbitrary program behavior at any level of detail. The tool suite implements optimized event analysis algorithms and customizable displays that enable fast and interactive rendering of very complex performance monitoring data.| -|**[VampirServer](http://www.vampir.eu)**|The Vampir software tool provides an easy-to-use framework that enables developers to quickly display and analyze arbitrary program behavior at any level of detail. The tool suite implements optimized event analysis algorithms and customizable displays that enable fast and interactive rendering of very complex performance monitoring data.| +| Module | Description | +| ------ | ----------- | +| [Advisor](https://software.intel.com/intel-advisor-xe) | Vectorization Optimization and Thread Prototyping - Vectorize & thread code or performance “dies” - Easy workflow + data + tips = faster code faster - Prioritize, Prototype & Predict performance gain | +| [Cube](http://www.scalasca.org/software/cube-4.x/download.html) | Cube, which is used as performance report explorer for Scalasca and Score-P, is a generic tool for displaying a multi-dimensional performance space consisting of the dimensions (i) performance metric, (ii) call path, and (iii) system resource. Each dimension can be represented as a tree, where non-leaf nodes of the tree can be collapsed or expanded to achieve the desired level of granularity. | +| [ipp](http://software.intel.com/en-us/articles/intel-ipp/) | Intel Integrated Performance Primitives (Intel IPP) is an extensive library of multicore-ready, highly optimized software functions for multimedia, data processing, and communications applications. Intel IPP offers thousands of optimized functions covering frequently used fundamental algorithms. | +| MAP | | +| [OPARI2](http://www.score-p.org) | OPARI2, the successor of Forschungszentrum Juelich's OPARI, is a source-to-source instrumentation tool for OpenMP and hybrid codes. It surrounds OpenMP directives and runtime library calls with calls to the POMP2 measurement interface. | +| [OTF2](http://www.score-p.org) | The Open Trace Format 2 is a highly scalable, memory efficient event trace data format plus support library. It will become the new standard trace format for Scalasca, Vampir, and Tau and is open for other tools. | +| [PAPI](http://icl.cs.utk.edu/projects/papi/) | PAPI provides the tool designer and application engineer with a consistent interface and methodology for use of the performance counter hardware found in most major microprocessors. PAPI enables software engineers to see, in near real time, the relation between software performance and processor events. In addition Component PAPI provides access to a collection of components that expose performance measurement opportunites across the hardware and software stack. | +| perfboost | | +| perfcatcher | | +| PerfReports | | +| perfsuite | | +| [Vampir](http://www.vampir.eu) | The Vampir software tool provides an easy-to-use framework that enables developers to quickly display and analyze arbitrary program behavior at any level of detail. The tool suite implements optimized event analysis algorithms and customizable displays that enable fast and interactive rendering of very complex performance monitoring data. | +| [VampirServer](http://www.vampir.eu) | The Vampir software tool provides an easy-to-use framework that enables developers to quickly display and analyze arbitrary program behavior at any level of detail. The tool suite implements optimized event analysis algorithms and customizable displays that enable fast and interactive rendering of very complex performance monitoring data. | ## Phys -|Module|Description| -|--|--| -|**[Meep](http://ab-initio.mit.edu/wiki/index.php/Meep)**|Meep (or MEEP) is a free finite-difference time-domain (FDTD) simulation software package developed at MIT to model electromagnetic systems.| -|**[phono3py](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.| -|**[phonopy](http://python.org/)**|Python is a programming language that lets you work more quickly and integrate your systems more effectively.| -|**Siesta**| | -|**VASP**| | +| Module | Description | +| ------ | ----------- | +| [Meep](http://ab-initio.mit.edu/wiki/index.php/Meep) | Meep (or MEEP) is a free finite-difference time-domain (FDTD) simulation software package developed at MIT to model electromagnetic systems. | +| [phono3py](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| [phonopy](http://python.org/) | Python is a programming language that lets you work more quickly and integrate your systems more effectively. | +| Siesta | | +| VASP | | ## System -|Module|Description| -|--|--| -|**[eudev](https://wiki.gentoo.org/wiki/Project:Eudev)**|eudev is a fork of systemd-udev with the goal of obtaining better compatibility with existing software such as OpenRC and Upstart, older kernels, various toolchains and anything else required by users and various distributions.| -|**[hwloc](http://www.open-mpi.org/projects/hwloc/)**|The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently.| -|**[libpciaccess](http://cgit.freedesktop.org/xorg/lib/libpciaccess/)**|Generic PCI access library.| +| Module | Description | +| ------ | ----------- | +| [eudev](https://wiki.gentoo.org/wiki/Project:Eudev) | eudev is a fork of systemd-udev with the goal of obtaining better compatibility with existing software such as OpenRC and Upstart, older kernels, various toolchains and anything else required by users and various distributions. | +| [hwloc](http://www.open-mpi.org/projects/hwloc/) | The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. | +| [libpciaccess](http://cgit.freedesktop.org/xorg/lib/libpciaccess/) | Generic PCI access library. | ## Toolchain -|Module|Description| -|--|--| -|**[foss]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK.| -|**[GNU](http://www.gnu.org/software/)**|Compiler-only toolchain with GCC and binutils.| -|**[gompi]((none))**|GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support.| -|**[iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel C, C++ and Fortran compilers| -|**[ictce](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL.| -|**[iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel C/C++ and Fortran compilers, alongside Intel MPI.| -|**[intel](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/)**|Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL.| -|**[PRACE](http://www.prace-ri.eu/PRACE-Common-Production)**|The PRACE Common Production Environment (PCPE) is a set of software tools and libraries that are planned to be available on all PRACE execution sites. The PCPE also defines a set of environment variables that try to make compilation on all sites as homogeneous and simple as possible.| -|**[prace](http://www.prace-ri.eu/PRACE-Common-Production)**|**** PRACE Common Production Environment (PCPE) **** Initialisation of the PRACE common production environment. This allows you to assume that the following tools/libraries are available by default in your PATH/environment. * Fortran, C, C++ Compilers * MPI * BLAS, LAPACK, BLACS, ScaLAPACK * FFTW * HDF5, NetCDF The compiler commands on are: * mpif90 - Fortran compiler * mpicc - C compiler * mpicxx - C++ compiler For more information on the PCPE please see the documentation at: http://www.prace-ri.eu/PRACE-Common-Production For help using this system, please see Local User Guide available at: http://prace-ri.eu/Best-Practice-Guide-Anselm-HTML| +| Module | Description | +| ------ | ----------- | +| [foss]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support, OpenBLAS (BLAS and LAPACK support), FFTW and ScaLAPACK. | +| [GNU](http://www.gnu.org/software/) | Compiler-only toolchain with GCC and binutils. | +| [gompi]((none)) | GNU Compiler Collection (GCC) based compiler toolchain, including OpenMPI for MPI support. | +| [iccifort](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C, C++ and Fortran compilers | +| [ictce](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL. | +| [iimpi](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel C/C++ and Fortran compilers, alongside Intel MPI. | +| [intel](http://software.intel.com/en-us/intel-cluster-toolkit-compiler/) | Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL. | +| [PRACE](http://www.prace-ri.eu/PRACE-Common-Production) | The PRACE Common Production Environment (PCPE) is a set of software tools and libraries that are planned to be available on all PRACE execution sites. The PCPE also defines a set of environment variables that try to make compilation on all sites as homogeneous and simple as possible. | +| [prace](http://www.prace-ri.eu/PRACE-Common-Production) | PRACE Common Production Environment (PCPE) Initialisation of the PRACE common production environment. This allows you to assume that the following tools/libraries are available by default in your PATH/environment. * Fortran, C, C++ Compilers * MPI * BLAS, LAPACK, BLACS, ScaLAPACK * FFTW * HDF5, NetCDF The compiler commands on are: * mpif90 - Fortran compiler * mpicc - C compiler * mpicxx - C++ compiler For more information on the PCPE please see the documentation at: http://www.prace-ri.eu/PRACE-Common-Production For help using this system, please see Local User Guide available at: http://prace-ri.eu/Best-Practice-Guide-Anselm-HTML | ## Tools -|Module|Description| -|--|--| -|**[APR](http://apr.apache.org/)**|Apache Portable Runtime (APR) libraries.| -|**[APR-util](http://apr.apache.org/)**|Apache Portable Runtime (APR) util libraries.| -|**[Bash](http://www.gnu.org/software/bash)**|Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh).| -|**[binutils](http://directory.fsf.org/project/binutils/)**|binutils: GNU binary utilities| -|**[bzip2](http://www.bzip.org/)**|bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression.| -|**[cURL](http://curl.haxx.se)**|libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more.| -|**[EasyBuild](http://hpcugent.github.com/easybuild/)**|EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way.| -|**[expat](http://expat.sourceforge.net/)**|Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags)| -|**[git](http://git-scm.com/)**|Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.| -|**globus**| | -|**[gzip](http://www.gnu.org/software/gzip/)**|gzip (GNU zip) is a popular data compression program as a replacement for compress| -|**[HPL](http://www.netlib.org/benchmark/hpl/)**|HPL is a software package that solves a (random) dense linear system in double precision (64 bits) arithmetic on distributed-memory computers. It can thus be regarded as a portable as well as freely available implementation of the High Performance Computing Linpack Benchmark.| -|**[Inspector](http://software.intel.com/en-us/intel-inspector-xe)**|Intel Inspector XE 2013 is an easy to use memory error checker and thread checker for serial and parallel applications| -|**[itac](http://software.intel.com/en-us/intel-trace-analyzer/)**|The Intel Trace Collector is a low-overhead tracing library that performs event-based tracing in applications. The Intel Trace Analyzer provides a convenient way to monitor application activities gathered by the Intel Trace Collector through graphical displays.| -|**[JOE](http://joe-editor.sourceforge.net)**|JOE is a full featured terminal-based screen editor which is distributed under the GNU General Public License (GPL)| -|**[likwid](https://github.com/RRZE-HPC/likwid)**|Toolsuite of command line applications for performance oriented programmers| -|**[Lmod](http://sourceforge.net/projects/lmod/)**|Lmod is a Lua based module system. Modules allow for dynamic modification of a user's environment under Unix systems. See www.tacc.utexas.edu/tacc-projects/lmod for a complete description. Lmod is a new implementation that easily handles the MODULEPATH Hierarchical problem. It is drop-in replacement for TCL/C modules and reads TCL modulefiles directly.| -|**MATLAB**| | -|**[Mercurial](http://mercurial.selenic.com/)**|Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface.| -|**[MIKE](http://www.mikepoweredbydhi.com)**|MIKE Powered by DHI is a part of DHI, the global organisation dedicated to solving challenges in water environments worldwide.| -|**[numactl](http://oss.sgi.com/projects/libnuma/)**|The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program.| -|**[PAPI](http://icl.cs.utk.edu/projects/papi/)**|PAPI provides the tool designer and application engineer with a consistent interface and methodology for use of the performance counter hardware found in most major microprocessors. PAPI enables software engineers to see, in near real time, the relation between software performance and processor events. In addition Component PAPI provides access to a collection of components that expose performance measurement opportunites across the hardware and software stack.| -|**[parallel](http://savannah.gnu.org/projects/parallel/)**|parallel: Build and execute shell commands in parallel| -|**pigz**| | -|**[QEMU](http://wiki.qemu.org/Main_Page)**|QEMU is a generic and open source machine emulator and virtualizer.| -|**[RStudio](https://www.rstudio.com)**|RStudio is a set of integrated tools designed to help you be more productive with R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management.| -|**Scalasca**| | -|**Score-P**| | -|**[SDE](https://software.intel.com/en-us/articles/intel-software-development-emulator)**|Intel Software Development Emulator is a pintool that enables the development of applications using instruction set extensions that are not currently implemented in hardware.| -|**[Serf](http://serf.apache.org/)**|The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library| -|**[Subversion](http://subversion.apache.org/)**|Subversion is an open source version control system.| -|**[Szip](http://www.hdfgroup.org/doc_resource/SZIP/)**|Szip compression software, providing lossless compression of scientific data| -|**[tcsh](http://www.tcsh.org)**|Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax.| -|**[turbovnc](http://www.turbovnc.org)**|TurboVNC is a derivative of VNC (Virtual Network Computing) that is tuned to provide peak performance for 3D and video workloads.| -|**[util-linux](http://www.kernel.org/pub/linux/utils/util-linux)**|Set of Linux utilities| -|**[VDE2](http://vde.sourceforge.net)**|VDE is an ethernet compliant virtual network that can be spawned over a set of physical computer over the Internet. VDE is part of virtualsquare project.| -|**[VirtualGL](http://www.virtualgl.org)**|VirtualGL is an open source toolkit that gives any Unix or Linux remote display software the ability to run OpenGL applications with full 3D hardware acceleration.| -|**[VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe)**|Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java.| -|**[Wine](https://www.winehq.org)**|Wine (originally an acronym for "Wine Is Not an Emulator") is a compatibility layer capable of running Windows applications on several POSIX-compliant operating systems, such as Linux, Mac OSX, & BSD.| -|**[XZ](http://tukaani.org/xz/)**|xz: XZ utilities| +| Module | Description | +| ------ | ----------- | +| [APR](http://apr.apache.org/) | Apache Portable Runtime (APR) libraries. | +| [APR-util](http://apr.apache.org/) | Apache Portable Runtime (APR) util libraries. | +| [Bash](http://www.gnu.org/software/bash) | Bash is an sh-compatible command language interpreter that executes commands read from the standard input or from a file. Bash also incorporates useful features from the Korn and C shells (ksh and csh). | +| [binutils](http://directory.fsf.org/project/binutils/) | binutils: GNU binary utilities | +| [bzip2](http://www.bzip.org/) | bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression. | +| [cURL](http://curl.haxx.se) | libcurl is a free and easy-to-use client-side URL transfer library, supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP. libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP form based upload, proxies, cookies, user+password authentication (Basic, Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling and more. | +| [EasyBuild](http://hpcugent.github.com/easybuild/) | EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way. | +| [expat](http://expat.sourceforge.net/) | Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags) | +| [git](http://git-scm.com/) | Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency. | +| globus | | +| [gzip](http://www.gnu.org/software/gzip/) | gzip (GNU zip) is a popular data compression program as a replacement for compress | +| [HPL](http://www.netlib.org/benchmark/hpl/) | HPL is a software package that solves a (random) dense linear system in double precision (64 bits) arithmetic on distributed-memory computers. It can thus be regarded as a portable as well as freely available implementation of the High Performance Computing Linpack Benchmark. | +| [Inspector](http://software.intel.com/en-us/intel-inspector-xe) | Intel Inspector XE 2013 is an easy to use memory error checker and thread checker for serial and parallel applications | +| [itac](http://software.intel.com/en-us/intel-trace-analyzer/) | The Intel Trace Collector is a low-overhead tracing library that performs event-based tracing in applications. The Intel Trace Analyzer provides a convenient way to monitor application activities gathered by the Intel Trace Collector through graphical displays. | +| [JOE](http://joe-editor.sourceforge.net) | JOE is a full featured terminal-based screen editor which is distributed under the GNU General Public License (GPL) | +| [likwid](https://github.com/RRZE-HPC/likwid) | Toolsuite of command line applications for performance oriented programmers | +| [Lmod](http://sourceforge.net/projects/lmod/) | Lmod is a Lua based module system. Modules allow for dynamic modification of a user's environment under Unix systems. See www.tacc.utexas.edu/tacc-projects/lmod for a complete description. Lmod is a new implementation that easily handles the MODULEPATH Hierarchical problem. It is drop-in replacement for TCL/C modules and reads TCL modulefiles directly. | +| MATLAB | | +| [Mercurial](http://mercurial.selenic.com/) | Mercurial is a free, distributed source control management tool. It efficiently handles projects of any size and offers an easy and intuitive interface. | +| [MIKE](http://www.mikepoweredbydhi.com) | MIKE Powered by DHI is a part of DHI, the global organisation dedicated to solving challenges in water environments worldwide. | +| [numactl](http://oss.sgi.com/projects/libnuma/) | The numactl program allows you to run your application program on specific cpu's and memory nodes. It does this by supplying a NUMA memory policy to the operating system before running your program. The libnuma library provides convenient ways for you to add NUMA memory policies into your own program. | +| [PAPI](http://icl.cs.utk.edu/projects/papi/) | PAPI provides the tool designer and application engineer with a consistent interface and methodology for use of the performance counter hardware found in most major microprocessors. PAPI enables software engineers to see, in near real time, the relation between software performance and processor events. In addition Component PAPI provides access to a collection of components that expose performance measurement opportunites across the hardware and software stack. | +| [parallel](http://savannah.gnu.org/projects/parallel/) | parallel: Build and execute shell commands in parallel | +| pigz | | +| [QEMU](http://wiki.qemu.org/Main_Page) | QEMU is a generic and open source machine emulator and virtualizer. | +| [RStudio](https://www.rstudio.com) | RStudio is a set of integrated tools designed to help you be more productive with R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management. | +| Scalasca | | +| Score-P | | +| [SDE](https://software.intel.com/en-us/articles/intel-software-development-emulator) | Intel Software Development Emulator is a pintool that enables the development of applications using instruction set extensions that are not currently implemented in hardware. | +| [Serf](http://serf.apache.org/) | The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library | +| [Subversion](http://subversion.apache.org/) | Subversion is an open source version control system. | +| [Szip](http://www.hdfgroup.org/doc_resource/SZIP/) | Szip compression software, providing lossless compression of scientific data | +| [tcsh](http://www.tcsh.org) | Tcsh is an enhanced, but completely compatible version of the Berkeley UNIX C shell (csh). It is a command language interpreter usable both as an interactive login shell and a shell script command processor. It includes a command-line editor, programmable word completion, spelling correction, a history mechanism, job control and a C-like syntax. | +| [turbovnc](http://www.turbovnc.org) | TurboVNC is a derivative of VNC (Virtual Network Computing) that is tuned to provide peak performance for 3D and video workloads. | +| [util-linux](http://www.kernel.org/pub/linux/utils/util-linux) | Set of Linux utilities | +| [VDE2](http://vde.sourceforge.net) | VDE is an ethernet compliant virtual network that can be spawned over a set of physical computer over the Internet. VDE is part of virtualsquare project. | +| [VirtualGL](http://www.virtualgl.org) | VirtualGL is an open source toolkit that gives any Unix or Linux remote display software the ability to run OpenGL applications with full 3D hardware acceleration. | +| [VTune](http://software.intel.com/en-us/intel-vtune-amplifier-xe) | Intel VTune Amplifier XE 2016 is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java. | +| [Wine](https://www.winehq.org) | Wine (originally an acronym for "Wine Is Not an Emulator") is a compatibility layer capable of running Windows applications on several POSIX-compliant operating systems, such as Linux, Mac OSX, & BSD. | +| [XZ](http://tukaani.org/xz/) | xz: XZ utilities | ## Vis -|Module|Description| -|--|--| -|**[cairo](http://cairographics.org)**|Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. Experimental backends include OpenGL, BeOS, OS/2, and DirectFB| -|**[ffmpeg](https://www.ffmpeg.org/)**|A complete, cross-platform solution to record, convert and stream audio and video.| -|**[fixesproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X.org FixesProto protocol headers.| -|**[FLTK](http://www.fltk.org)**|FLTK is a cross-platform C++ GUI toolkit for UNIX/Linux (X11), Microsoft Windows, and MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its built-in GLUT emulation.| -|**[fontconfig](http://www.freedesktop.org/software/fontconfig)**|Fontconfig is a library designed to provide system-wide font configuration, customization and application access.| -|**[freetype](http://freetype.org)**|FreeType 2 is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). It can be used in graphics libraries, display servers, font conversion tools, text image generation tools, and many other products as well.| -|**[gettext](http://www.gnu.org/software/gettext/)**|GNU `gettext' is an important step for the GNU Translation Project, as it is an asset on which we may build many other steps. This package offers to programmers, translators, and even users, a well integrated set of tools and documentation| -|**[GLib](http://www.gtk.org/)**|GLib is one of the base libraries of the GTK+ project| -|**[GPI-2](http://www.gpi-site.com/gpi2/)**|GPI-2 is an API for the development of scalable, asynchronous and fault tolerant parallel applications.| -|**[grace](http://freecode.com/projects/grace)**|Grace is a WYSIWYG 2D plotting tool for X Windows System and Motif.| -|**[inputproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X.org InputProto protocol headers.| -|**[JasPer](http://www.ece.uvic.ca/~frodo/jasper/)**|The JasPer Project is an open-source initiative to provide a free software-based reference implementation of the codec specified in the JPEG-2000 Part-1 standard.| -|**[kbproto](http://www.freedesktop.org/wiki/Software/xlibs)**|X.org KBProto protocol headers.| -|**[libGLU](ftp://ftp.freedesktop.org/pub/mesa/glu/)**|The OpenGL Utility Library (GLU) is a computer graphics library for OpenGL.| -|**[libICE](http://www.freedesktop.org/wiki/Software/xlibs)**|X Inter-Client Exchange library for freedesktop.org| -|**[libX11](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 client-side library| -|**[libXau](http://www.freedesktop.org/wiki/Software/xlibs)**|The libXau package contains a library implementing the X11 Authorization Protocol. This is useful for restricting client access to the display.| -|**[libXdamage](http://www.freedesktop.org/wiki/Software/xlibs)**|X Damage extension library| -|**[libXdmcp](http://www.freedesktop.org/wiki/Software/xlibs)**|The libXdmcp package contains a library implementing the X Display Manager Control Protocol. This is useful for allowing clients to interact with the X Display Manager.| -|**[libXext](http://www.freedesktop.org/wiki/Software/xlibs)**|Common X Extensions library| -|**[libXfixes](http://www.freedesktop.org/wiki/Software/xlibs)**|X Fixes extension library| -|**[libXfont](http://www.freedesktop.org/wiki/Software/xlibs)**|X font libary| -|**[libXft](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 client-side library| -|**[libXinerama](http://www.freedesktop.org/wiki/Software/xlibs)**|Xinerama multiple monitor library| -|**[libXrender](http://www.freedesktop.org/wiki/Software/xlibs)**|X11 client-side library| -|**[libXt](http://www.freedesktop.org/wiki/Software/xlibs)**|libXt provides the X Toolkit Intrinsics, an abstract widget library upon which other toolkits are based. Xt is the basis for many toolkits, including the Athena widgets (Xaw), and LessTif (a Motif implementation).| -|**matplotlib**| | -|**[Mesa](http://www.mesa3d.org/)**|Mesa is an open-source implementation of the OpenGL specification - a system for rendering interactive 3D graphics.| -|**[motif](http://motif.ics.com/)**|Motif refers to both a graphical user interface (GUI) specification and the widget toolkit for building applications that follow that specification under the X Window System on Unix and other POSIX-compliant systems. It was the standard toolkit for the Common Desktop Environment and thus for Unix.| -|**[OpenCV](http://opencv.org/)**|OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine learning software library. OpenCV was built to provide a common infrastructure for computer vision applications and to accelerate the use of machine perception in the commercial products.| -|**[OpenDX](http://www.opendx.org)**|Open source visualization software package based on IBM's Visualization Data Explorer.| -|**[OSPRay](http://www.ospray.org)**|A Ray Tracing Based Rendering Engine for High-Fidelity Visualization| -|**[p4vasp](http://www.p4vasp.at)**|p4vasp is a visualization suite for the Vienna Ab-initio Simulation Package (VASP). It contains an extensible GUI framework, that can be used to view material structure, density of states, band-structure and more.| -|**[ParaView](http://www.paraview.org)**|ParaView is a scientific parallel visualizer.| -|**[pixman](http://www.pixman.org/)**|Pixman is a low-level software library for pixel manipulation, providing features such as image compositing and trapezoid rasterization. Important users of pixman are the cairo graphics library and the X server.| -|**[PyQt](http://www.riverbankcomputing.co.uk/software/pyqt)**|PyQt is a set of Python v2 and v3 bindings for Digia's Qt application framework.| -|**[SUMO](http://www.sumo.dlr.de/wiki/Main_Page)**|Simulation of Urban MObility (SUMO) is an open source, highly portable, microscopic and continuous road traffic simulation package designed to handle large road networks.| -|**[Tk](http://www.tcl.tk/)**|Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages.| -|**[VisIt](https://wci.llnl.gov/simulation/computer-codes/visit)**|VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool| +| Module | Description | +| ------ | ----------- | +| [cairo](http://cairographics.org) | Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. Experimental backends include OpenGL, BeOS, OS/2, and DirectFB | +| [ffmpeg](https://www.ffmpeg.org/) | A complete, cross-platform solution to record, convert and stream audio and video. | +| [fixesproto](http://www.freedesktop.org/wiki/Software/xlibs) | X.org FixesProto protocol headers. | +| [FLTK](http://www.fltk.org) | FLTK is a cross-platform C++ GUI toolkit for UNIX/Linux (X11), Microsoft Windows, and MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its built-in GLUT emulation. | +| [fontconfig](http://www.freedesktop.org/software/fontconfig) | Fontconfig is a library designed to provide system-wide font configuration, customization and application access. | +| [freetype](http://freetype.org) | FreeType 2 is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). It can be used in graphics libraries, display servers, font conversion tools, text image generation tools, and many other products as well. | +| [gettext](http://www.gnu.org/software/gettext/) | GNU `gettext' is an important step for the GNU Translation Project, as it is an asset on which we may build many other steps. This package offers to programmers, translators, and even users, a well integrated set of tools and documentation | +| [GLib](http://www.gtk.org/) | GLib is one of the base libraries of the GTK+ project | +| [GPI-2](http://www.gpi-site.com/gpi2/) | GPI-2 is an API for the development of scalable, asynchronous and fault tolerant parallel applications. | +| [grace](http://freecode.com/projects/grace) | Grace is a WYSIWYG 2D plotting tool for X Windows System and Motif. | +| [inputproto](http://www.freedesktop.org/wiki/Software/xlibs) | X.org InputProto protocol headers. | +| [JasPer](http://www.ece.uvic.ca/~frodo/jasper/) | The JasPer Project is an open-source initiative to provide a free software-based reference implementation of the codec specified in the JPEG-2000 Part-1 standard. | +| [kbproto](http://www.freedesktop.org/wiki/Software/xlibs) | X.org KBProto protocol headers. | +| [libGLU](ftp://ftp.freedesktop.org/pub/mesa/glu/) | The OpenGL Utility Library (GLU) is a computer graphics library for OpenGL. | +| [libICE](http://www.freedesktop.org/wiki/Software/xlibs) | X Inter-Client Exchange library for freedesktop.org | +| [libX11](http://www.freedesktop.org/wiki/Software/xlibs) | X11 client-side library | +| [libXau](http://www.freedesktop.org/wiki/Software/xlibs) | The libXau package contains a library implementing the X11 Authorization Protocol. This is useful for restricting client access to the display. | +| [libXdamage](http://www.freedesktop.org/wiki/Software/xlibs) | X Damage extension library | +| [libXdmcp](http://www.freedesktop.org/wiki/Software/xlibs) | The libXdmcp package contains a library implementing the X Display Manager Control Protocol. This is useful for allowing clients to interact with the X Display Manager. | +| [libXext](http://www.freedesktop.org/wiki/Software/xlibs) | Common X Extensions library | +| [libXfixes](http://www.freedesktop.org/wiki/Software/xlibs) | X Fixes extension library | +| [libXfont](http://www.freedesktop.org/wiki/Software/xlibs) | X font libary | +| [libXft](http://www.freedesktop.org/wiki/Software/xlibs) | X11 client-side library | +| [libXinerama](http://www.freedesktop.org/wiki/Software/xlibs) | Xinerama multiple monitor library | +| [libXrender](http://www.freedesktop.org/wiki/Software/xlibs) | X11 client-side library | +| [libXt](http://www.freedesktop.org/wiki/Software/xlibs) | libXt provides the X Toolkit Intrinsics, an abstract widget library upon which other toolkits are based. Xt is the basis for many toolkits, including the Athena widgets (Xaw), and LessTif (a Motif implementation). | +| matplotlib | | +| [Mesa](http://www.mesa3d.org/) | Mesa is an open-source implementation of the OpenGL specification - a system for rendering interactive 3D graphics. | +| [motif](http://motif.ics.com/) | Motif refers to both a graphical user interface (GUI) specification and the widget toolkit for building applications that follow that specification under the X Window System on Unix and other POSIX-compliant systems. It was the standard toolkit for the Common Desktop Environment and thus for Unix. | +| [OpenCV](http://opencv.org/) | OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine learning software library. OpenCV was built to provide a common infrastructure for computer vision applications and to accelerate the use of machine perception in the commercial products. | +| [OpenDX](http://www.opendx.org) | Open source visualization software package based on IBM's Visualization Data Explorer. | +| [OSPRay](http://www.ospray.org) | A Ray Tracing Based Rendering Engine for High-Fidelity Visualization | +| [p4vasp](http://www.p4vasp.at) | p4vasp is a visualization suite for the Vienna Ab-initio Simulation Package (VASP). It contains an extensible GUI framework, that can be used to view material structure, density of states, band-structure and more. | +| [ParaView](http://www.paraview.org) | ParaView is a scientific parallel visualizer. | +| [pixman](http://www.pixman.org/) | Pixman is a low-level software library for pixel manipulation, providing features such as image compositing and trapezoid rasterization. Important users of pixman are the cairo graphics library and the X server. | +| [PyQt](http://www.riverbankcomputing.co.uk/software/pyqt) | PyQt is a set of Python v2 and v3 bindings for Digia's Qt application framework. | +| [SUMO](http://www.sumo.dlr.de/wiki/Main_Page) | Simulation of Urban MObility (SUMO) is an open source, highly portable, microscopic and continuous road traffic simulation package designed to handle large road networks. | +| [Tk](http://www.tcl.tk/) | Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages. | +| [VisIt](https://wci.llnl.gov/simulation/computer-codes/visit) | VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool | diff --git a/docs.it4i/robots.txt b/docs.it4i/robots.txt new file mode 100644 index 0000000000000000000000000000000000000000..72b82b8423d63ccfd7286352b3f09b5d41607be6 --- /dev/null +++ b/docs.it4i/robots.txt @@ -0,0 +1,3 @@ +Sitemap: https://docs.it4i.cz/sitemap.xml.gz +User-agent: * +Disallow: diff --git a/docs.it4i/salomon/7d-enhanced-hypercube.md b/docs.it4i/salomon/7d-enhanced-hypercube.md index ec11ddb1145823a67c2b8b72fae0f80c42a8c00e..af082502dcd86b173da87c3c804517ca44445443 100644 --- a/docs.it4i/salomon/7d-enhanced-hypercube.md +++ b/docs.it4i/salomon/7d-enhanced-hypercube.md @@ -1,16 +1,12 @@ -7D Enhanced Hypercube -===================== - -### 7D Enhanced Hypercube {#d-enhanced-hypercube} +# 7D Enhanced Hypercube  -|Node type|Count|Short name|Long name|Rack| -|---|---|---|---|---| -|M-Cell compute nodes w/o accelerator|576|cns1 -cns576|r1i0n0 - r4i7n17|1-4| -|compute nodes MIC accelerated|432|cns577 - cns1008|r21u01n577 - r37u31n1008|21-38| +| Node type | Count | Short name | Long name | Rack | +| ------------------------------------ | ----- | ---------------- | ------------------------ | ----- | +| M-Cell compute nodes w/o accelerator | 576 | cns1 -cns576 | r1i0n0 - r4i7n17 | 1-4 | +| compute nodes MIC accelerated | 432 | cns577 - cns1008 | r21u01n577 - r37u31n1008 | 21-38 | -### IB Topology +## IB Topology  - diff --git a/docs.it4i/salomon/capacity-computing.md b/docs.it4i/salomon/capacity-computing.md index ec853f41fae7632a55859d54533f49f792294850..aa947db011b4ba820f3736b445e1bb639233ef14 100644 --- a/docs.it4i/salomon/capacity-computing.md +++ b/docs.it4i/salomon/capacity-computing.md @@ -1,37 +1,33 @@ -Capacity computing -================== +# Capacity computing -Introduction ------------- +## Introduction In many cases, it is useful to submit huge (100+) number of computational jobs into the PBS queue system. Huge number of (small) jobs is one of the most effective ways to execute embarrassingly parallel calculations, achieving best runtime, throughput and computer utilization. However, executing huge number of jobs via the PBS queue may strain the system. This strain may result in slow response to commands, inefficient scheduling and overall degradation of performance and user experience, for all users. For this reason, the number of jobs is **limited to 100 per user, 1500 per job array** -!!! Note "Note" - Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time. +!!! note + Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time. -- Use [Job arrays](capacity-computing.md#job-arrays) when running huge number of [multithread](capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs -- Use [GNU parallel](capacity-computing/#gnu-parallel) when running single core jobs -- Combine [GNU parallel with Job arrays](capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs +* Use [Job arrays](capacity-computing.md#job-arrays) when running huge number of [multithread](capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs +* Use [GNU parallel](capacity-computing/#gnu-parallel) when running single core jobs +* Combine [GNU parallel with Job arrays](capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs -Policy ------- +## Policy -1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](capacity-computing/#job-arrays). -2. The array size is at most 1000 subjobs. +1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](capacity-computing/#job-arrays). +1. The array size is at most 1000 subjobs. -Job arrays --------------- +## Job Arrays -!!! Note "Note" - Huge number of jobs may be easily submitted and managed as a job array. +!!! note + Huge number of jobs may be easily submitted and managed as a job array. A job array is a compact representation of many jobs, called subjobs. The subjobs share the same job script, and have the same values for all attributes and resources, with the following exceptions: -- each subjob has a unique index, $PBS_ARRAY_INDEX -- job Identifiers of subjobs only differ by their indices -- the state of subjobs can differ (R,Q,...etc.) +* each subjob has a unique index, $PBS_ARRAY_INDEX +* job Identifiers of subjobs only differ by their indices +* the state of subjobs can differ (R,Q,...etc.) All subjobs within a job array have the same scheduling priority and schedule as independent jobs. Entire job array is submitted through a single qsub command and may be managed by qdel, qalter, qhold, qrls and qsig commands as a single job. @@ -78,7 +74,7 @@ In this example, the submit directory holds the 900 input files, executable mypr If huge number of parallel multicore (in means of multinode multithread, e. g. MPI enabled) jobs is needed to run, then a job array approach should also be used. The main difference compared to previous example using one node is that the local scratch should not be used (as it's not shared between nodes) and MPI or other technique for parallel multinode run has to be used properly. -### Submit the job array +### Submit the Job Array To submit the job array, use the qsub -J command. The 900 jobs of the [example above](capacity-computing/#array_example) may be submitted like this: @@ -97,7 +93,7 @@ $ qsub -N JOBNAME -J 9-10:2 jobscript This will only choose the lower index (9 in this example) for submitting/running your job. -### Manage the job array +### Manage the Job Array Check status of the job array by the qstat command. @@ -105,10 +101,10 @@ Check status of the job array by the qstat command. $ qstat -a 506493[].isrv5 isrv5: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -12345[].dm2 user2 qprod xx 13516 1 24 -- 00:50 B 00:02 +12345[].dm2 user2 qprod xx 13516 1 24 -- 00:50 B 00:02 ``` The status B means that some subjobs are already running. @@ -119,16 +115,16 @@ Check status of the first 100 subjobs by the qstat command. $ qstat -a 12345[1-100].isrv5 isrv5: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -12345[1].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:02 -12345[2].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:02 -12345[3].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:01 -12345[4].isrv5 user2 qprod xx 13516 1 24 -- 00:50 Q -- +12345[1].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:02 +12345[2].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:02 +12345[3].isrv5 user2 qprod xx 13516 1 24 -- 00:50 R 00:01 +12345[4].isrv5 user2 qprod xx 13516 1 24 -- 00:50 Q -- . . . . . . . . . . . , . . . . . . . . . . -12345[100].isrv5 user2 qprod xx 13516 1 24 -- 00:50 Q -- +12345[100].isrv5 user2 qprod xx 13516 1 24 -- 00:50 Q -- ``` Delete the entire job array. Running subjobs will be killed, queueing subjobs will be deleted. @@ -153,13 +149,12 @@ $ qstat -u $USER -tJ Read more on job arrays in the [PBSPro Users guide](../../pbspro-documentation/). -GNU parallel ----------------- +## GNU Parallel -!!! Note "Note" - Use GNU parallel to run many single core tasks on one node. +!!! note + Use GNU parallel to run many single core tasks on one node. -GNU parallel is a shell tool for executing jobs in parallel using one or more computers. A job can be a single command or a small script that has to be run for each of the lines in the input. GNU parallel is most useful in running single core jobs via the queue system on Anselm. +GNU parallel is a shell tool for executing jobs in parallel using one or more computers. A job can be a single command or a small script that has to be run for each of the lines in the input. GNU parallel is most useful in running single core jobs via the queue system on Anselm. For more information and examples see the parallel man page: @@ -168,7 +163,7 @@ $ module add parallel $ man parallel ``` -### GNU parallel jobscript +### GNU Parallel jobscript The GNU parallel shell executes multiple instances of the jobscript using all cores on the node. The instances execute different work, controlled by the $PARALLEL_SEQ variable. @@ -204,7 +199,7 @@ TASK=$1 cp $PBS_O_WORKDIR/$TASK input # execute the calculation -cat input > output +cat input > output # copy output file to submit directory cp output $PBS_O_WORKDIR/$TASK.out @@ -212,7 +207,7 @@ cp output $PBS_O_WORKDIR/$TASK.out In this example, tasks from tasklist are executed via the GNU parallel. The jobscript executes multiple instances of itself in parallel, on all cores of the node. Once an instace of jobscript is finished, new instance starts until all entries in tasklist are processed. Currently processed entry of the joblist may be retrieved via $1 variable. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to local scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. -### Submit the job +### Submit the Job To submit the job, use the qsub command. The 101 tasks' job of the [example above](capacity-computing/#gp_example) may be submitted like this: @@ -221,22 +216,22 @@ $ qsub -N JOBNAME jobscript 12345.dm2 ``` -In this example, we submit a job of 101 tasks. 24 input files will be processed in parallel. The 101 tasks on 24 cores are assumed to complete in less than 2 hours. +In this example, we submit a job of 101 tasks. 24 input files will be processed in parallel. The 101 tasks on 24 cores are assumed to complete in less than 2 hours. -Please note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. +!!! note + Use #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. -Job arrays and GNU parallel -------------------------------- +## Job Arrays and GNU Parallel -!!! Note "Note" - Combine the Job arrays and GNU parallel for best throughput of single core jobs +!!! note + Combine the Job arrays and GNU parallel for best throughput of single core jobs While job arrays are able to utilize all available computational nodes, the GNU parallel can be used to efficiently run multiple single-core jobs on single node. The two approaches may be combined to utilize all available (current and future) resources to execute single core jobs. -!!! Note "Note" - Every subjob in an array runs GNU parallel to utilize all cores on the node +!!! note + Every subjob in an array runs GNU parallel to utilize all cores on the node -### GNU parallel, shared jobscript +### GNU Parallel, Shared jobscript Combined approach, very similar to job arrays, can be taken. Job array is submitted to the queuing system. The subjobs run GNU parallel. The GNU parallel shell executes multiple instances of the jobscript using all cores on the node. The instances execute different work, controlled by the $PBS_JOB_ARRAY and $PARALLEL_SEQ variables. @@ -286,18 +281,18 @@ cat input > output cp output $PBS_O_WORKDIR/$TASK.out ``` -In this example, the jobscript executes in multiple instances in parallel, on all cores of a computing node. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to local scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The numtasks file controls how many tasks will be run per subjob. Once an task is finished, new task starts, until the number of tasks in numtasks file is reached. +In this example, the jobscript executes in multiple instances in parallel, on all cores of a computing node. Variable $TASK expands to one of the input filenames from tasklist. We copy the input file to local scratch, execute the myprog.x and copy the output file back to the submit directory, under the $TASK.out name. The numtasks file controls how many tasks will be run per subjob. Once an task is finished, new task starts, until the number of tasks in numtasks file is reached. -!!! Note "Note" - Select subjob walltime and number of tasks per subjob carefully +!!! note + Select subjob walltime and number of tasks per subjob carefully When deciding this values, think about following guiding rules : -1. Let n=N/24. Inequality (n+1) * T < W should hold. The N is number of tasks per subjob, T is expected single task walltime and W is subjob walltime. Short subjob walltime improves scheduling and job throughput. -2. Number of tasks should be modulo 24. -3. These rules are valid only when all tasks have similar task walltimes T. +1. Let n = N / 24. Inequality (n + 1) x T < W should hold. The N is number of tasks per subjob, T is expected single task walltime and W is subjob walltime. Short subjob walltime improves scheduling and job throughput. +1. Number of tasks should be modulo 24. +1. These rules are valid only when all tasks have similar task walltimes T. -### Submit the job array +### Submit the Job Array (-J) To submit the job array, use the qsub -J command. The 992 tasks' job of the [example above](capacity-computing/#combined_example) may be submitted like this: @@ -308,10 +303,10 @@ $ qsub -N JOBNAME -J 1-992:32 jobscript In this example, we submit a job array of 31 subjobs. Note the -J 1-992:**48**, this must be the same as the number sent to numtasks file. Each subjob will run on full node and process 24 input files in parallel, 48 in total per subjob. Every subjob is assumed to complete in less than 2 hours. -Please note the #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. +!!! note + Use #PBS directives in the beginning of the jobscript file, dont' forget to set your valid PROJECT_ID and desired queue. -Examples --------- +## Examples Download the examples in [capacity.zip](capacity.zip), illustrating the above listed ways to run huge number of jobs. We recommend to try out the examples, before using this for running production jobs. diff --git a/docs.it4i/salomon/compute-nodes.md b/docs.it4i/salomon/compute-nodes.md index af6b9708691b33a03103e8926076ab5637057b9d..6123b723efbc2d265efac37946cbe748f38c13aa 100644 --- a/docs.it4i/salomon/compute-nodes.md +++ b/docs.it4i/salomon/compute-nodes.md @@ -1,109 +1,107 @@ -Compute Nodes -============= +# Compute Nodes + +## Nodes Configuration -Nodes Configuration -------------------- Salomon is cluster of x86-64 Intel based nodes. The cluster contains two types of compute nodes of the same processor type and memory size. Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerators.** [More about schematic representation of the Salomon cluster compute nodes IB topology](ib-single-plane-topology/). -###Compute Nodes Without Accelerator +### Compute Nodes Without Accelerator -- codename "grafton" -- 576 nodes -- 13 824 cores in total -- two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node -- 128 GB of physical memory per node +* codename "grafton" +* 576 nodes +* 13 824 cores in total +* two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node +* 128 GB of physical memory per node - + -###Compute Nodes With MIC Accelerator +### Compute Nodes With MIC Accelerator -- codename "perrin" -- 432 nodes -- 10 368 cores in total -- two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node -- 128 GB of physical memory per node -- MIC accelerator 2 x Intel Xeon Phi 7120P per node, 61-cores, 16 GB per accelerator +* codename "perrin" +* 432 nodes +* 10 368 cores in total +* two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node +* 128 GB of physical memory per node +* MIC accelerator 2 x Intel Xeon Phi 7120P per node, 61-cores, 16 GB per accelerator - +  - + -### UV 2000 +### Uv 2000 -- codename "UV2000" -- 1 node -- 112 cores in total -- 14 x Intel Xeon E5-4627v2, 8-core, 3.3 GHz processors, in 14 NUMA nodes -- 3328 GB of physical memory per node -- 1 x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM +* codename "UV2000" +* 1 node +* 112 cores in total +* 14 x Intel Xeon E5-4627v2, 8-core, 3.3 GHz processors, in 14 NUMA nodes +* 3328 GB of physical memory per node +* 1 x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM  ### Compute Nodes Summary - |Node type |Count |Memory |Cores | - | --- | --- | --- | --- | - |Nodes without accelerator |576 |128 GB |24 @ 2.5Ghz | - |Nodes with MIC accelerator |432 |128 GB<p>32GB\ |<p>24 @ 2.5Ghz<p>61 @ 1.238 GHz\ | - |UV2000 SMP node |1 |3328GB\ |<p>112 @ 3.3GHz\ | +| Node type | Count | Memory | Cores | +| -------------------------- | ----- | ----------------- | ----------------------------------- | +| Nodes without accelerator | 576 | 128 GB | 24 @ 2.5GHz | +| Nodes with MIC accelerator | 432 | 128 GB, MIC 32GB | 24 @ 2.5GHz, MIC 61 @ 1.238 GHz | +| UV2000 SMP node | 1 | 3328GB | 112 @ 3.3GHz | -Processor Architecture ----------------------- +## Processor Architecture Salomon is equipped with Intel Xeon processors Intel Xeon E5-2680v3. Processors support Advanced Vector Extensions 2.0 (AVX2) 256-bit instruction set. ### Intel Xeon E5-2680v3 Processor -- 12-core -- speed: 2.5 GHz, up to 3.3 GHz using Turbo Boost Technology -- peak performance: 19.2 GFLOP/s per core -- caches: - - Intel® Smart Cache: 30 MB -- memory bandwidth at the level of the processor: 68 GB/s +* 12-core +* speed: 2.5 GHz, up to 3.3 GHz using Turbo Boost Technology +* peak performance: 19.2 GFLOP/s per core +* caches: + * Intel® Smart Cache: 30 MB +* memory bandwidth at the level of the processor: 68 GB/s ### MIC Accelerator Intel Xeon Phi 7120P Processor -- 61-core -- speed: 1.238 +* 61-core +* speed: 1.238 GHz, up to 1.333 GHz using Turbo Boost Technology -- peak performance: 18.4 GFLOP/s per core -- caches: - - L2: 30.5 MB -- memory bandwidth at the level of the processor: 352 GB/s +* peak performance: 18.4 GFLOP/s per core +* caches: + * L2: 30.5 MB +* memory bandwidth at the level of the processor: 352 GB/s + +## Memory Architecture -Memory Architecture -------------------- Memory is equally distributed across all CPUs and cores for optimal performance. Memory is composed of memory modules of the same size and evenly distributed across all memory controllers and memory channels. ### Compute Node Without Accelerator -- 2 sockets -- Memory Controllers are integrated into processors. - - 8 DDR4 DIMMs per node - - 4 DDR4 DIMMs per CPU - - 1 DDR4 DIMMs per channel -- Populated memory: 8 x 16 GB DDR4 DIMM >2133 MHz +* 2 sockets +* Memory Controllers are integrated into processors. + * 8 DDR4 DIMMs per node + * 4 DDR4 DIMMs per CPU + * 1 DDR4 DIMMs per channel +* Populated memory: 8 x 16 GB DDR4 DIMM >2133 MHz ### Compute Node With MIC Accelerator 2 sockets Memory Controllers are integrated into processors. -- 8 DDR4 DIMMs per node -- 4 DDR4 DIMMs per CPU -- 1 DDR4 DIMMs per channel +* 8 DDR4 DIMMs per node +* 4 DDR4 DIMMs per CPU +* 1 DDR4 DIMMs per channel Populated memory: 8 x 16 GB DDR4 DIMM 2133 MHz MIC Accelerator Intel Xeon Phi 7120P Processor -- 2 sockets -- Memory Controllers are are connected via an +* 2 sockets +* Memory Controllers are are connected via an Interprocessor Network (IPN) ring. - - 16 GDDR5 DIMMs per node - - 8 GDDR5 DIMMs per CPU - - 2 GDDR5 DIMMs per channel + * 16 GDDR5 DIMMs per node + * 8 GDDR5 DIMMs per CPU + * 2 GDDR5 DIMMs per channel diff --git a/docs.it4i/salomon/environment-and-modules.md b/docs.it4i/salomon/environment-and-modules.md index b6ae85042866e704d9e5d67e58ffbc9c1c3207c4..9671013566e7621e42b2d0cdf693eed783f13197 100644 --- a/docs.it4i/salomon/environment-and-modules.md +++ b/docs.it4i/salomon/environment-and-modules.md @@ -1,7 +1,6 @@ -Environment and Modules -======================= +# Environment and Modules -### Environment Customization +## Environment Customization After logging in, you may want to configure the environment. Write your preferred path definitions, aliases, functions and module loads in the .bashrc file @@ -17,22 +16,19 @@ fi alias qs='qstat -a' module load intel/2015b -# Display informations to standard output - only in interactive ssh session +# Display information to standard output - only in interactive ssh session if [ -n "$SSH_TTY" ] then module list # Display loaded modules fi ``` -!!! Note "Note" - Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. - -How to using modules in examples: -<tty-player controls src=/src/salomon/modules_salomon.ttyrec></tty-player> +!!! note + Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. ### Application Modules -In order to configure your shell for running particular application on Salomon we use Module package interface. +In order to configure your shell for running particular application on Salomon we use Module package interface. Application modules on Salomon cluster are built using [EasyBuild](http://hpcugent.github.io/easybuild/ "EasyBuild"). The modules are divided into the following structure: @@ -60,8 +56,8 @@ Application modules on Salomon cluster are built using [EasyBuild](http://hpcuge vis: Visualization, plotting, documentation and typesetting ``` -!!! Note "Note" - The modules set up the application paths, library paths and environment variables for running particular application. +!!! note + The modules set up the application paths, library paths and environment variables for running particular application. The modules may be loaded, unloaded and switched, according to momentary needs. @@ -71,7 +67,7 @@ To check available modules use $ module avail ``` -To load a module, for example the Open MPI module use +To load a module, for example the Open MPI module use ```bash $ module load OpenMPI @@ -111,18 +107,18 @@ The EasyBuild framework prepares the build environment for the different toolcha Recent releases of EasyBuild include out-of-the-box toolchain support for: -- various compilers, including GCC, Intel, Clang, CUDA -- common MPI libraries, such as Intel MPI, MPICH, MVAPICH2, Open MPI -- various numerical libraries, including ATLAS, Intel MKL, OpenBLAS, ScaLAPACK, FFTW +* various compilers, including GCC, Intel, Clang, CUDA +* common MPI libraries, such as Intel MPI, MPICH, MVAPICH2, Open MPI +* various numerical libraries, including ATLAS, Intel MKL, OpenBLAS, ScaLAPACK, FFTW On Salomon, we have currently following toolchains installed: - |Toolchain|Module(s)| - |---|----| - |GCC|GCC| - |ictce|icc, ifort, imkl, impi| - |intel|GCC, icc, ifort, imkl, impi| - |gompi|GCC, OpenMPI| - |goolf|BLACS, FFTW, GCC, OpenBLAS, OpenMPI, ScaLAPACK| - |iompi|OpenMPI, icc, ifort| - |iccifort|icc, ifort| +| Toolchain | Module(s) | +| --------- | ---------------------------------------------- | +| GCC | GCC | +| ictce | icc, ifort, imkl, impi | +| intel | GCC, icc, ifort, imkl, impi | +| gompi | GCC, OpenMPI | +| goolf | BLACS, FFTW, GCC, OpenBLAS, OpenMPI, ScaLAPACK | +| iompi | OpenMPI, icc, ifort | +| iccifort | icc, ifort | diff --git a/docs.it4i/salomon/hardware-overview.md b/docs.it4i/salomon/hardware-overview.md index a7465b809e8664fd968b187502dfcbe1ebb47da9..d1bd65a810686a7425730dde75794ee82f285e83 100644 --- a/docs.it4i/salomon/hardware-overview.md +++ b/docs.it4i/salomon/hardware-overview.md @@ -1,60 +1,57 @@ -Hardware Overview -================= +# Hardware Overview -Introduction ------------- -The Salomon cluster consists of 1008 computational nodes of which 576 are regular compute nodes and 432 accelerated nodes. Each node is a powerful x86-64 computer, equipped with 24 cores (two twelve-core Intel Xeon processors) and 128 GB RAM. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 0.5 PB /home NFS disk storage to store the user files. Users may use a DDN Lustre shared storage with capacity of 1.69 PB which is available for the scratch project data. The user access to the Salomon cluster is provided by four login nodes. +## Introduction + +The Salomon cluster consists of 1008 computational nodes of which 576 are regular compute nodes and 432 accelerated nodes. Each node is a powerful x86-64 computer, equipped with 24 cores (two twelve-core Intel Xeon processors) and 128 GB RAM. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 0.5 PB /home NFS disk storage to store the user files. Users may use a DDN Lustre shared storage with capacity of 1.69 PB which is available for the scratch project data. The user access to the Salomon cluster is provided by four login nodes. [More about schematic representation of the Salomon cluster compute nodes IB topology](ib-single-plane-topology/). - + The parameters are summarized in the following tables: -General information -------------------- - -|**In general**|| -|---|---| -|Primary purpose|High Performance Computing| -|Architecture of compute nodes|x86-64| -|Operating system|CentOS 6.x Linux| -|[**Compute nodes**](compute-nodes/)|| -|Totally|1008| -|Processor|2 x Intel Xeon E5-2680v3, 2.5 GHz, 12 cores| -|RAM|128GB, 5.3 GB per core, DDR4@2133 MHz| -|Local disk drive|no| -|Compute network / Topology|InfiniBand FDR56 / 7D Enhanced hypercube| -|w/o accelerator|576| -|MIC accelerated|432| -|**In total**|| -|Total theoretical peak performance (Rpeak)|2011 TFLOP/s| -|Total amount of RAM|129.024 TB| - -Compute nodes -------------- - -|Node|Count|Processor|Cores|Memory|Accelerator| -|---|---|---|---|---|---| -|w/o accelerator|576|2 x Intel Xeon E5-2680v3, 2.5 GHz|24|128 GB|-| -|MIC accelerated|432|2 x Intel Xeon E5-2680v3, 2.5 GHz|24|128 GB|2 x Intel Xeon Phi 7120P, 61 cores, 16 GB RAM| +## General Information + +| **In general** | | +| ------------------------------------------- | ------------------------------------------- | +| Primary purpose | High Performance Computing | +| Architecture of compute nodes | x86-64 | +| Operating system | CentOS 6.x Linux | +| [**Compute nodes**](compute-nodes/) | | +| Totally | 1008 | +| Processor | 2 x Intel Xeon E5-2680v3, 2.5 GHz, 12 cores | +| RAM | 128GB, 5.3 GB per core, DDR4@2133 MHz | +| Local disk drive | no | +| Compute network / Topology | InfiniBand FDR56 / 7D Enhanced hypercube | +| w/o accelerator | 576 | +| MIC accelerated | 432 | +| **In total** | | +| Total theoretical peak performance (Rpeak) | 2011 TFLOP/s | +| Total amount of RAM | 129.024 TB | + +## Compute Nodes + +| Node | Count | Processor | Cores | Memory | Accelerator | +| --------------- | ----- | --------------------------------- | ----- | ------ | --------------------------------------------- | +| w/o accelerator | 576 | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24 | 128 GB | - | +| MIC accelerated | 432 | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24 | 128 GB | 2 x Intel Xeon Phi 7120P, 61 cores, 16 GB RAM | For more details please refer to the [Compute nodes](compute-nodes/). -Remote visualization nodes --------------------------- +## Remote Visualization Nodes + For remote visualization two nodes with NICE DCV software are available each configured: -|Node|Count|Processor|Cores|Memory|GPU Accelerator| -|---|---|---|---|---|---| -|visualization|2|2 x Intel Xeon E5-2695v3, 2.3 GHz|28|512 GB|NVIDIA QUADRO K5000, 4 GB RAM| +| Node | Count | Processor | Cores | Memory | GPU Accelerator | +| ------------- | ----- | --------------------------------- | ----- | ------ | ----------------------------- | +| visualization | 2 | 2 x Intel Xeon E5-2695v3, 2.3 GHz | 28 | 512 GB | NVIDIA QUADRO K5000, 4 GB RAM | + +## SGI Uv 2000 -SGI UV 2000 ------------ For large memory computations a special SMP/NUMA SGI UV 2000 server is available: -|Node |Count |Processor |Cores|Memory|Extra HW | -| --- | --- | --- | --- | --- | --- | -|UV2000 |1 |14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores |112 |3328 GB DDR3@1866 MHz |2 x 400GB local SSD</br>1x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM | +| Node | Count | Processor | Cores | Memory | Extra HW | +| ------ | ----- | ------------------------------------------- | ----- | --------------------- | ------------------------------------------------------------------------ | +| UV2000 | 1 | 14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores | 112 | 3328 GB DDR3@1866 MHz | 2 x 400GB local SSD</br>1x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM |  diff --git a/docs.it4i/salomon/ib-single-plane-topology.md b/docs.it4i/salomon/ib-single-plane-topology.md index 8bb8d0d0229c83612e3dad13ff1cd39955628486..b1a5381e6280b9c1ace1c84c90b12ef2d4641650 100644 --- a/docs.it4i/salomon/ib-single-plane-topology.md +++ b/docs.it4i/salomon/ib-single-plane-topology.md @@ -1,32 +1,31 @@ -IB single-plane topology -======================== +# IB single-plane topology A complete M-Cell assembly consists of four compute racks. Each rack contains 4 x physical IRUs - Independent rack units. Using one dual socket node per one blade slot leads to 8 logical IRUs. Each rack contains 4 x 2 SGI ICE X IB Premium Blades. The SGI ICE X IB Premium Blade provides the first level of interconnection via dual 36-port Mellanox FDR InfiniBand ASIC switch with connections as follows: -- 9 ports from each switch chip connect to the unified backplane, to connect the 18 compute node slots -- 3 ports on each chip provide connectivity between the chips -- 24 ports from each switch chip connect to the external bulkhead, for a total of 48 +* 9 ports from each switch chip connect to the unified backplane, to connect the 18 compute node slots +* 3 ports on each chip provide connectivity between the chips +* 24 ports from each switch chip connect to the external bulkhead, for a total of 48 -###IB single-plane topology - ICEX M-Cell +## IB Single-Plane Topology - ICEX M-Cell Each color in each physical IRU represents one dual-switch ASIC switch. -[IB single-plane topology - ICEX Mcell.pdf](../src/IB single-plane topology - ICEX Mcell.pdf) +[IB single-plane topology - ICEX Mcell.pdf](<../src/IB single-plane topology - ICEX Mcell.pdf>)  -### IB single-plane topology - Accelerated nodes +## IB Single-Plane Topology - Accelerated Nodes Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](7d-enhanced-hypercube/). As shown in a diagram  -- Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack. -- Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack. -- Racks 33, 34, 35, 36, 37, 38 are equivalent to one M-Cell rack. +* Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack. +* Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack. +* Racks 33, 34, 35, 36, 37, 38 are equivalent to one M-Cell rack. -[IB single-plane topology - Accelerated nodes.pdf](../src/IB single-plane topology - Accelerated nodes.pdf) +[IB single-plane topology - Accelerated nodes.pdf](<../src/IB single-plane topology - Accelerated nodes.pdf>)  diff --git a/docs.it4i/salomon/introduction.md b/docs.it4i/salomon/introduction.md index 87950f42243c3eb1829edd44122cea8d15c1e721..83ff79221fc01aadbf0cfa1258220778bc275308 100644 --- a/docs.it4i/salomon/introduction.md +++ b/docs.it4i/salomon/introduction.md @@ -1,17 +1,16 @@ -Introduction -============ +# Introduction Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128 GB RAM. Nodes are interconnected by 7D Enhanced hypercube InfiniBand network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). -The cluster runs [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [ Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) +The cluster runs [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) -**Water-cooled Compute Nodes With MIC Accelerator** +## Water-Cooled Compute Nodes With MIC Accelerator - +  -**Tape Library T950B** +## Tape Library T950B  diff --git a/docs.it4i/salomon/job-priority.md b/docs.it4i/salomon/job-priority.md index 97bc6968c4bd537521d1a2e9700454456fff80ab..265afe5441ba0ad549348c7de9edc07c3fb078fb 100644 --- a/docs.it4i/salomon/job-priority.md +++ b/docs.it4i/salomon/job-priority.md @@ -1,17 +1,16 @@ -Job scheduling -============== +# Job scheduling + +## Job Execution Priority -Job execution priority ----------------------- Scheduler gives each job an execution priority and then uses this job execution priority to select which job(s) to run. Job execution priority is determined by these job properties (in order of importance): -1. queue priority -2. fair-share priority -3. eligible time +1. queue priority +1. fair-share priority +1. eligible time -### Queue priority +### Queue Priority Queue priority is priority of queue where job is queued before execution. @@ -19,7 +18,7 @@ Queue priority has the biggest impact on job execution priority. Execution prior Queue priorities can be seen at <https://extranet.it4i.cz/rsweb/salomon/queues> -### Fair-share priority +### Fair-Share Priority Fair-share priority is priority calculated on recent usage of resources. Fair-share priority is calculated per project, all members of project share same fair-share priority. Projects with higher recent usage have lower fair-share priority than projects with lower or none recent usage. @@ -30,19 +29,19 @@ Fair-share priority is calculated as  where MAX_FAIRSHARE has value 1E6, -usage*Project* is cumulated usage by all members of selected project, -usage*Total* is total usage by all users, by all projects. +usage<sub>Project</sub> is cumulated usage by all members of selected project, +usage<sub>Total</sub> is total usage by all users, by all projects. Usage counts allocated core-hours (`ncpus x walltime`). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week). -Jobs queued in queue qexp are not calculated to project's usage. -======= -!!! Note "Note" - Calculated usage and fair-share priority can be seen at <https://extranet.it4i.cz/rsweb/salomon/projects>. +## Jobs Queued in Queue qexp Are Not Calculated to Project's Usage. + +!!! note + Calculated usage and fair-share priority can be seen at <https://extranet.it4i.cz/rsweb/salomon/projects>. Calculated fair-share priority can be also seen as Resource_List.fairshare attribute of a job. -###Eligible time +### Eligible Time Eligible time is amount (in seconds) of eligible time job accrued while waiting to run. Jobs with higher eligible time gains higher priority. @@ -66,11 +65,11 @@ The scheduler makes a list of jobs to run in order of execution priority. Schedu It means, that jobs with lower execution priority can be run before jobs with higher execution priority. -!!! Note "Note" - It is **very beneficial to specify the walltime** when submitting jobs. +!!! note + It is **very beneficial to specify the walltime** when submitting jobs. Specifying more accurate walltime enables better scheduling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority. -### Job placement +### Job Placement Job [placement can be controlled by flags during submission](job-submission-and-execution/#job_placement). diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md index 23f97bb9bae22abde7943c8fb9bc42fa708a3748..e7a4c4ff0039815504804e9f5fcb30959e8713e6 100644 --- a/docs.it4i/salomon/job-submission-and-execution.md +++ b/docs.it4i/salomon/job-submission-and-execution.md @@ -1,19 +1,18 @@ -Job submission and execution -============================ +# Job submission and execution + +## Job Submission -Job Submission --------------- When allocating computational resources for the job, please specify -1. suitable queue for your job (default is qprod) -2. number of computational nodes required -3. number of cores per node required -4. maximum wall time allocated to your calculation, note that jobs exceeding maximum wall time will be killed -5. Project ID -6. Jobscript or interactive switch +1. suitable queue for your job (default is qprod) +1. number of computational nodes required +1. number of cores per node required +1. maximum wall time allocated to your calculation, note that jobs exceeding maximum wall time will be killed +1. Project ID +1. Jobscript or interactive switch -!!! Note "Note" - Use the **qsub** command to submit your job to a queue for allocation of the computational resources. +!!! note + Use the **qsub** command to submit your job to a queue for allocation of the computational resources. Submit the job using the qsub command: @@ -23,8 +22,8 @@ $ qsub -A Project_ID -q queue -l select=x:ncpus=y,walltime=[[hh:]mm:]ss[.ms] job The qsub submits the job into the queue, in another words the qsub command creates a request to the PBS Job manager for allocation of specified resources. The resources will be allocated when available, subject to above described policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.** -!!! Note "Note" - PBS statement nodes (qsub -l nodes=nodespec) is not supported on Salomon cluster. +!!! note + PBS statement nodes (qsub -l nodes=nodespec) is not supported on Salomon cluster. ### Job Submission Examples @@ -44,15 +43,15 @@ In this example, we allocate 4 nodes, 24 cores per node, for 1 hour. We allocate $ qsub -A OPEN-0-0 -q qlong -l select=10:ncpus=24 ./myjob ``` -In this example, we allocate 10 nodes, 24 cores per node, for 72 hours. We allocate these resources via the qlong queue. Jobscript myjob will be executed on the first node in the allocation. +In this example, we allocate 10 nodes, 24 cores per node, for 72 hours. We allocate these resources via the qlong queue. Jobscript myjob will be executed on the first node in the allocation. ```bash $ qsub -A OPEN-0-0 -q qfree -l select=10:ncpus=24 ./myjob ``` -In this example, we allocate 10 nodes, 24 cores per node, for 12 hours. We allocate these resources via the qfree queue. It is not required that the project OPEN-0-0 has any available resources left. Consumed resources are still accounted for. Jobscript myjob will be executed on the first node in the allocation. +In this example, we allocate 10 nodes, 24 cores per node, for 12 hours. We allocate these resources via the qfree queue. It is not required that the project OPEN-0-0 has any available resources left. Consumed resources are still accounted for. Jobscript myjob will be executed on the first node in the allocation. -### Intel Xeon Phi co-processors +### Intel Xeon Phi Co-Processors To allocate a node with Xeon Phi co-processor, user needs to specify that in select statement. Currently only allocation of whole nodes with both Phi cards as the smallest chunk is supported. Standard PBSPro approach through attributes "accelerator", "naccelerators" and "accelerator_model" is used. The "accelerator_model" can be omitted, since on Salomon only one type of accelerator type/model is available. @@ -72,18 +71,18 @@ In this example, we allocate 4 nodes, with 24 cores per node (totalling 96 cores ### UV2000 SMP -!!! Note "Note" - 14 NUMA nodes available on UV2000 +!!! note + 14 NUMA nodes available on UV2000 Per NUMA node allocation. Jobs are isolated by cpusets. -The UV2000 (node uv1) offers 3328GB of RAM and 112 cores, distributed in 14 NUMA nodes. A NUMA node packs 8 cores and approx. 236GB RAM. In the PBS the UV2000 provides 14 chunks, a chunk per NUMA node (see [Resource allocation policy](resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself. +The UV2000 (node uv1) offers 3328GB of RAM and 112 cores, distributed in 14 NUMA nodes. A NUMA node packs 8 cores and approx. 236GB RAM. In the PBS the UV2000 provides 14 chunks, a chunk per NUMA node (see [Resource allocation policy](resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself. ```bash $ qsub -A OPEN-0-0 -q qfat -l select=14 ./myjob ``` -In this example, we allocate all 14 NUMA nodes (corresponds to 14 chunks), 112 cores of the SGI UV2000 node for 72 hours. Jobscript myjob will be executed on the node uv1. +In this example, we allocate all 14 NUMA nodes (corresponds to 14 chunks), 112 cores of the SGI UV2000 node for 72 hours. Jobscript myjob will be executed on the node uv1. ```bash $ qsub -A OPEN-0-0 -q qfat -l select=1:mem=2000GB ./myjob @@ -91,7 +90,7 @@ $ qsub -A OPEN-0-0 -q qfat -l select=1:mem=2000GB ./myjob In this example, we allocate 2000GB of memory on the UV2000 for 72 hours. By requesting 2000GB of memory, 10 chunks are allocated. Jobscript myjob will be executed on the node uv1. -### Useful tricks +### Useful Tricks All qsub options may be [saved directly into the jobscript](#example-jobscript-for-mpi-calculation-with-preloaded-inputs). In such a case, no options to qsub are needed. @@ -105,13 +104,12 @@ By default, the PBS batch system sends an e-mail only when the job is aborted. D $ qsub -m n ``` -Advanced job placement --------------------------- +## Advanced Job Placement -### Placement by name +### Placement by Name -!!! Note "Note" - Not useful for ordinary computing, suitable for node testing/bechmarking and management tasks. +!!! note + Not useful for ordinary computing, suitable for node testing/bechmarking and management tasks. Specific nodes may be selected using PBS resource attribute host (for hostnames): @@ -127,21 +125,20 @@ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=24:host=cns680+1:ncpus=24:host=cns68 In this example, we allocate nodes r24u35n680 and r24u36n681, all 24 cores per node, for 24 hours. Consumed resources will be accounted to the Project identified by Project ID OPEN-0-0. The resources will be available interactively. -### Placement by network location - -Network location of allocated nodes in the [Infiniband network](network/) influences efficiency of network communication between nodes of job. Nodes on the same Infiniband switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology. +### Placement by Network Location -For communication intensive jobs it is possible to set stricter requirement - to require nodes directly connected to the same Infiniband switch or to require nodes located in the same dimension group of the Infiniband network. +Network location of allocated nodes in the [InifiBand network](network/) influences efficiency of network communication between nodes of job. Nodes on the same InifiBand switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology. +For communication intensive jobs it is possible to set stricter requirement - to require nodes directly connected to the same InifiBand switch or to require nodes located in the same dimension group of the InifiBand network. -### Placement by Infiniband switch +### Placement by InifiBand Switch -Nodes directly connected to the same Infiniband switch can communicate most efficiently. Using the same switch prevents hops in the network and provides for unbiased, most efficient network communication. There are 9 nodes directly connected to every Infiniband switch. +Nodes directly connected to the same InifiBand switch can communicate most efficiently. Using the same switch prevents hops in the network and provides for unbiased, most efficient network communication. There are 9 nodes directly connected to every InifiBand switch. -!!! Note "Note" - We recommend allocating compute nodes of a single switch when the best possible computational network performance is required to run job efficiently. +!!! note + We recommend allocating compute nodes of a single switch when the best possible computational network performance is required to run job efficiently. -Nodes directly connected to the one Infiniband switch can be allocated using node grouping on PBS resource attribute switch. +Nodes directly connected to the one InifiBand switch can be allocated using node grouping on PBS resource attribute switch. In this example, we request all 9 nodes directly connected to the same switch using node grouping placement. @@ -149,22 +146,20 @@ In this example, we request all 9 nodes directly connected to the same switch us $ qsub -A OPEN-0-0 -q qprod -l select=9:ncpus=24 -l place=group=switch ./myjob ``` -### Placement by specific Infiniband switch +### Placement by Specific InifiBand Switch -!!! Note "Note" - Not useful for ordinary computing, suitable for testing and management tasks. +!!! note + Not useful for ordinary computing, suitable for testing and management tasks. - -Nodes directly connected to the specific Infiniband switch can be selected using the PBS resource attribute *switch*. +Nodes directly connected to the specific InifiBand switch can be selected using the PBS resource attribute _switch_. In this example, we request all 9 nodes directly connected to r4i1s0sw1 switch. - ```bash $ qsub -A OPEN-0-0 -q qprod -l select=9:ncpus=24:switch=r4i1s0sw1 ./myjob ``` -List of all Infiniband switches: +List of all InifiBand switches: ```bash $ qmgr -c 'print node @a' | grep switch | awk '{print $6}' | sort -u @@ -177,7 +172,8 @@ r1i2s0sw0 ... ``` -List of all all nodes directly connected to the specific Infiniband switch: +List of all all nodes directly connected to the specific InifiBand switch: + ```bash $ qmgr -c 'p n @d' | grep 'switch = r36sw3' | awk '{print $3}' | sort r36u31n964 @@ -191,21 +187,22 @@ r37u33n971 r37u34n972 ``` -### Placement by Hypercube dimension +### Placement by Hypercube Dimension -Nodes located in the same dimension group may be allocated using node grouping on PBS resource attribute ehc_[1-7]d . +Nodes located in the same dimension group may be allocated using node grouping on PBS resource attribute ehc\_[1-7]d . -|Hypercube dimension|node_group_key|#nodes per group| -|---|---|---| -|1D|ehc_1d|18| -|2D|ehc_2d|36| -|3D|ehc_3d|72| -|4D|ehc_4d|144| -|5D|ehc_5d|144,288| -|6D|ehc_6d|432,576| -|7D|ehc_7d|all| +| Hypercube dimension | node_group_key | #nodes per group | +| ------------------- | -------------- | ---------------- | +| 1D | ehc_1d | 18 | +| 2D | ehc_2d | 36 | +| 3D | ehc_3d | 72 | +| 4D | ehc_4d | 144 | +| 5D | ehc_5d | 144,288 | +| 6D | ehc_6d | 432,576 | +| 7D | ehc_7d | all | In this example, we allocate 16 nodes in the same [hypercube dimension](7d-enhanced-hypercube/) 1 group. + ```bash $ qsub -A OPEN-0-0 -q qprod -l select=16:ncpus=24 -l place=group=ehc_1d -I ``` @@ -224,6 +221,7 @@ $ qmgr -c 'p n @d' | grep ehc_1d | awk '{print $6}' | sort |uniq -c ``` List of all all nodes in specific dimension 1 group: + ```bash $ $ qmgr -c 'p n @d' | grep 'ehc_1d = r1i0' | awk '{print $3}' | sort r1i0n0 @@ -233,11 +231,10 @@ r1i0n11 ... ``` -Job Management --------------- +## Job Management -!!! Note "Note" - Check status of your jobs using the **qstat** and **check-pbs-jobs** commands +!!! note + Check status of your jobs using the **qstat** and **check-pbs-jobs** commands ```bash $ qstat -a @@ -252,15 +249,15 @@ Example: $ qstat -a srv11: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -16287.isrv5 user1 qlong job1 6183 4 64 -- 144:0 R 38:25 -16468.isrv5 user1 qlong job2 8060 4 64 -- 144:0 R 17:44 -16547.isrv5 user2 qprod job3x 13516 2 32 -- 48:00 R 00:58 +16287.isrv5 user1 qlong job1 6183 4 64 -- 144:0 R 38:25 +16468.isrv5 user1 qlong job2 8060 4 64 -- 144:0 R 17:44 +16547.isrv5 user2 qprod job3x 13516 2 32 -- 48:00 R 00:58 ``` -In this example user1 and user2 are running jobs named job1, job2 and job3x. The jobs job1 and job2 are using 4 nodes, 16 cores per node each. The job1 already runs for 38 hours and 25 minutes, job2 for 17 hours 44 minutes. The job1 already consumed 64*38.41 = 2458.6 core hours. The job3x already consumed 0.96*32 = 30.93 core hours. These consumed core hours will be accounted on the respective project accounts, regardless of whether the allocated cores were actually used for computations. +In this example user1 and user2 are running jobs named job1, job2 and job3x. The jobs job1 and job2 are using 4 nodes, 16 cores per node each. The job1 already runs for 38 hours and 25 minutes, job2 for 17 hours 44 minutes. The job1 already consumed 64 x 38.41 = 2458.6 core hours. The job3x already consumed 0.96 x 32 = 30.93 core hours. These consumed core hours will be accounted on the respective project accounts, regardless of whether the allocated cores were actually used for computations. Check status of your jobs using check-pbs-jobs command. Check presence of user's PBS jobs' processes on execution hosts. Display load, processes. Display job standard and error output. Continuously display (tail -f) job standard or error output. @@ -315,8 +312,8 @@ Run loop 3 In this example, we see actual output (some iteration loops) of the job 35141.dm2 -!!! Note "Note" - Manage your queued or running jobs, using the **qhold**, **qrls**, **qdel,** **qsig** or **qalter** commands +!!! note + Manage your queued or running jobs, using the **qhold**, **qrls**, **qdel,** **qsig** or **qalter** commands You may release your allocation at any time, using qdel command @@ -336,35 +333,34 @@ Learn more by reading the pbs man page $ man pbs_professional ``` -Job Execution -------------- +## Job Execution ### Jobscript -!!! Note "Note" - Prepare the jobscript to run batch jobs in the PBS queue system +!!! note + Prepare the jobscript to run batch jobs in the PBS queue system The Jobscript is a user made script, controlling sequence of commands for executing the calculation. It is often written in bash, other scripts may be used as well. The jobscript is supplied to PBS **qsub** command as an argument and executed by the PBS Professional workload manager. -!!! Note "Note" - The jobscript or interactive shell is executed on first of the allocated nodes. +!!! note + The jobscript or interactive shell is executed on first of the allocated nodes. ```bash $ qsub -q qexp -l select=4:ncpus=24 -N Name0 ./myjob $ qstat -n -u username isrv5: - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -15209.isrv5 username qexp Name0 5530 4 96 -- 01:00 R 00:00 +15209.isrv5 username qexp Name0 5530 4 96 -- 01:00 R 00:00 r21u01n577/0*24+r21u02n578/0*24+r21u03n579/0*24+r21u04n580/0*24 ``` In this example, the nodes r21u01n577, r21u02n578, r21u03n579, r21u04n580 were allocated for 1 hour via the qexp queue. The jobscript myjob will be executed on the node r21u01n577, while the nodes r21u02n578, r21u03n579, r21u04n580 are available for use as well. -!!! Note "Note" - The jobscript or interactive shell is by default executed in home directory +!!! note + The jobscript or interactive shell is by default executed in home directory ```bash $ qsub -q qexp -l select=4:ncpus=24 -I @@ -377,8 +373,8 @@ $ pwd In this example, 4 nodes were allocated interactively for 1 hour via the qexp queue. The interactive shell is executed in the home directory. -!!! Note "Note" - All nodes within the allocation may be accessed via ssh. Unallocated nodes are not accessible to user. +!!! note + All nodes within the allocation may be accessed via ssh. Unallocated nodes are not accessible to user. The allocated nodes are accessible via ssh from login nodes. The nodes may access each other via ssh as well. @@ -409,8 +405,8 @@ In this example, the hostname program is executed via pdsh from the interactive ### Example Jobscript for MPI Calculation -!!! Note "Note" - Production jobs must use the /scratch directory for I/O +!!! note + Production jobs must use the /scratch directory for I/O The recommended way to run production jobs is to change to /scratch directory early in the jobscript, copy all inputs to /scratch, execute the calculations and copy outputs to home directory. @@ -441,15 +437,15 @@ exit In this example, some directory on the /home holds the input file input and executable mympiprog.x . We create a directory myjob on the /scratch filesystem, copy input and executable files from the /home directory where the qsub was invoked ($PBS_O_WORKDIR) to /scratch, execute the MPI programm mympiprog.x and copy the output file back to the /home directory. The mympiprog.x is executed as one process per node, on all allocated nodes. -!!! Note "Note" - Consider preloading inputs and executables onto [shared scratch](storage/) before the calculation starts. +!!! note + Consider preloading inputs and executables onto [shared scratch](storage/) before the calculation starts. In some cases, it may be impractical to copy the inputs to scratch and outputs to home. This is especially true when very large input and output files are expected, or when the files should be reused by a subsequent calculation. In such a case, it is users responsibility to preload the input files on shared /scratch before the job submission and retrieve the outputs manually, after all calculations are finished. -!!! Note "Note" - Store the qsub options within the jobscript. Use **mpiprocs** and **ompthreads** qsub options to control the MPI job execution. +!!! note + Store the qsub options within the jobscript. Use **mpiprocs** and **ompthreads** qsub options to control the MPI job execution. -### Example Jobscript for MPI Calculation with preloaded inputs +### Example Jobscript for MPI Calculation With Preloaded Inputs Example jobscript for an MPI job with preloaded inputs and executables, options for qsub are stored within the script : @@ -480,8 +476,8 @@ HTML commented section #2 (examples need to be reworked) ### Example Jobscript for Single Node Calculation -!!! Note "Note" - Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. Be very careful, use of RAM disk filesystem is at the expense of operational memory. +!!! note + Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. Be very careful, use of RAM disk filesystem is at the expense of operational memory. Example jobscript for single node calculation, using [local scratch](storage/) on the node: diff --git a/docs.it4i/salomon/network.md b/docs.it4i/salomon/network.md index 9fbf17d255c2a5c542cb881dba1f689c6c68baff..2f3f8a09f474c12ffe961781c39ea6fbea260a46 100644 --- a/docs.it4i/salomon/network.md +++ b/docs.it4i/salomon/network.md @@ -1,11 +1,10 @@ -Network -======= +# Network All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet) network. Only [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network may be used to transfer user data. -InfiniBand Network ------------------- +## InfiniBand Network + All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](7d-enhanced-hypercube/). Read more about schematic representation of the Salomon cluster [IB single-plain topology](ib-single-plane-topology/) @@ -15,16 +14,15 @@ The compute nodes may be accessed via the Infiniband network using ib0 network i The network provides **2170MB/s** transfer rates via the TCP connection (single stream) and up to **3600MB/s** via native Infiniband protocol. -Example -------- +## Example ```bash $ qsub -q qexp -l select=4:ncpus=16 -N Name0 ./myjob $ qstat -n -u username - Req'd Req'd Elap -Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time + Req'd Req'd Elap +Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time --------------- -------- -- |---|---| ------ --- --- ------ ----- - ----- -15209.isrv5 username qexp Name0 5530 4 96 -- 01:00 R 00:00 +15209.isrv5 username qexp Name0 5530 4 96 -- 01:00 R 00:00 r4i1n0/0*24+r4i1n1/0*24+r4i1n2/0*24+r4i1n3/0*24 ``` @@ -34,7 +32,7 @@ In this example, we access the node r4i1n0 by Infiniband network via the ib0 int $ ssh 10.17.35.19 ``` -In this example, we get +In this example, we get information of the Infiniband network. ```bash diff --git a/docs.it4i/salomon/prace.md b/docs.it4i/salomon/prace.md index f64d7a31b60fb8cdc05c7b2e6b1c22dd4b9d0fae..f1990d29251e00f4389ba5d23f399777a86ed726 100644 --- a/docs.it4i/salomon/prace.md +++ b/docs.it4i/salomon/prace.md @@ -1,28 +1,26 @@ -PRACE User Support -================== +# PRACE User Support -Intro ------ -PRACE users coming to Salomon as to TIER-1 system offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](../get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required. +## Intro + +PRACE users coming to Salomon as to TIER-1 system offered through the DECI calls are in general treated as standard users and so most of the general documentation applies to them as well. This section shows the main differences for quicker orientation, but often uses references to the original documentation. PRACE users who don't undergo the full procedure (including signing the IT4I AuP on top of the PRACE AuP) will not have a password and thus access to some services intended for regular users. This can lower their comfort, but otherwise they should be able to use the TIER-1 system as intended. Please see the [Obtaining Login Credentials section](../general/obtaining-login-credentials/obtaining-login-credentials/), if the same level of access is required. All general [PRACE User Documentation](http://www.prace-ri.eu/user-documentation/) should be read before continuing reading the local documentation here. -Help and Support ------------------------- +## Help and Support + If you have any troubles, need information, request support or want to install additional software, please use [PRACE Helpdesk](http://www.prace-ri.eu/helpdesk-guide264/). Information about the local services are provided in the [introduction of general user documentation](introduction/). Please keep in mind, that standard PRACE accounts don't have a password to access the web interface of the local (IT4Innovations) request tracker and thus a new ticket should be created by sending an e-mail to support[at]it4i.cz. -Obtaining Login Credentials ---------------------------- +## Obtaining Login Credentials + In general PRACE users already have a PRACE account setup through their HOMESITE (institution from their country) as a result of rewarded PRACE project proposal. This includes signed PRACE AuP, generated and registered certificates, etc. -If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on the Salomon cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so please see the corresponding [section of the general documentation here](../get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials/). +If there's a special need a PRACE user can get a standard (local) account at IT4Innovations. To get an account on the Salomon cluster, the user needs to obtain the login credentials. The procedure is the same as for general users of the cluster, so please see the corresponding [section of the general documentation here](../general/obtaining-login-credentials/obtaining-login-credentials/). -Accessing the cluster ---------------------- +## Accessing the Cluster -### Access with GSI-SSH +### Access With GSI-SSH For all PRACE users the method for interactive access (login) and data transfer based on grid services from Globus Toolkit (GSI SSH and GridFTP) is supported. @@ -30,11 +28,11 @@ The user will need a valid certificate and to be present in the PRACE LDAP (plea Most of the information needed by PRACE users accessing the Salomon TIER-1 system can be found here: -- [General user's FAQ](http://www.prace-ri.eu/Users-General-FAQs) -- [Certificates FAQ](http://www.prace-ri.eu/Certificates-FAQ) -- [Interactive access using GSISSH](http://www.prace-ri.eu/Interactive-Access-Using-gsissh) -- [Data transfer with GridFTP](http://www.prace-ri.eu/Data-Transfer-with-GridFTP-Details) -- [Data transfer with gtransfer](http://www.prace-ri.eu/Data-Transfer-with-gtransfer) +* [General user's FAQ](http://www.prace-ri.eu/Users-General-FAQs) +* [Certificates FAQ](http://www.prace-ri.eu/Certificates-FAQ) +* [Interactive access using GSISSH](http://www.prace-ri.eu/Interactive-Access-Using-gsissh) +* [Data transfer with GridFTP](http://www.prace-ri.eu/Data-Transfer-with-GridFTP-Details) +* [Data transfer with gtransfer](http://www.prace-ri.eu/Data-Transfer-with-gtransfer) Before you start to use any of the services don't forget to create a proxy certificate from your certificate: @@ -50,17 +48,17 @@ To check whether your proxy certificate is still valid (by default it's valid 12 To access Salomon cluster, two login nodes running GSI SSH service are available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners). -**Access from PRACE network:** +#### Access From PRACE Network: It is recommended to use the single DNS name salomon-prace.it4i.cz which is distributed between the two login nodes. If needed, user can login directly to one of the login nodes. The addresses are: -|Login address|Port|Protocol|Login node| -|---|---|---|---| -|salomon-prace.it4i.cz|2222|gsissh|login1, login2, login3 or login4| -|login1-prace.salomon.it4i.cz|2222|gsissh|login1| -|login2-prace.salomon.it4i.cz|2222|gsissh|login2| -|login3-prace.salomon.it4i.cz|2222|gsissh|login3| -|login4-prace.salomon.it4i.cz|2222|gsissh|login4| +| Login address | Port | Protocol | Login node | +| ---------------------------- | ---- | -------- | -------------------------------- | +| salomon-prace.it4i.cz | 2222 | gsissh | login1, login2, login3 or login4 | +| login1-prace.salomon.it4i.cz | 2222 | gsissh | login1 | +| login2-prace.salomon.it4i.cz | 2222 | gsissh | login2 | +| login3-prace.salomon.it4i.cz | 2222 | gsissh | login3 | +| login4-prace.salomon.it4i.cz | 2222 | gsissh | login4 | ```bash $ gsissh -p 2222 salomon-prace.it4i.cz @@ -72,17 +70,17 @@ When logging from other PRACE system, the prace_service script can be used: $ gsissh `prace_service -i -s salomon` ``` -**Access from public Internet:** +#### Access From Public Internet: It is recommended to use the single DNS name salomon.it4i.cz which is distributed between the two login nodes. If needed, user can login directly to one of the login nodes. The addresses are: -|Login address|Port|Protocol|Login node| -|---|---|---|---| -|salomon.it4i.cz|2222|gsissh|login1, login2, login3 or login4| -|login1.salomon.it4i.cz|2222|gsissh|login1| -|login2-prace.salomon.it4i.cz|2222|gsissh|login2| -|login3-prace.salomon.it4i.cz|2222|gsissh|login3| -|login4-prace.salomon.it4i.cz|2222|gsissh|login4| +| Login address | Port | Protocol | Login node | +| ---------------------------- | ---- | -------- | -------------------------------- | +| salomon.it4i.cz | 2222 | gsissh | login1, login2, login3 or login4 | +| login1.salomon.it4i.cz | 2222 | gsissh | login1 | +| login2-prace.salomon.it4i.cz | 2222 | gsissh | login2 | +| login3-prace.salomon.it4i.cz | 2222 | gsissh | login3 | +| login4-prace.salomon.it4i.cz | 2222 | gsissh | login4 | ```bash $ gsissh -p 2222 salomon.it4i.cz @@ -107,36 +105,36 @@ implementation on Salomon supports also SCP, so for small files transfer gsiscp $ gsiscp -P 2222 salomon-prace.it4i.cz:_SALOMON_PATH_TO_YOUR_FILE_ _LOCAL_PATH_TO_YOUR_FILE_ ``` -### Access to X11 applications (VNC) +### Access to X11 Applications (VNC) -If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, please see the [section in general documentation](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/). +If the user needs to run X11 based graphical application and does not have a X11 server, the applications can be run using VNC service. If the user is using regular SSH based access, please see the [section in general documentation](../general/accessing-the-clusters/graphical-user-interface/x-window-system/). -If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH: +If the user uses GSI SSH based access, then the procedure is similar to the SSH based access ([look here](../general/accessing-the-clusters/graphical-user-interface/x-window-system/)), only the port forwarding must be done using GSI SSH: ```bash $ gsissh -p 2222 salomon.it4i.cz -L 5961:localhost:5961 ``` -### Access with SSH +### Access With SSH After successful obtainment of login credentials for the local IT4Innovations account, the PRACE users can access the cluster as regular users using SSH. For more information please see the [section in general documentation](shell-and-data-access/). -File transfers ------------------- +## File Transfers + PRACE users can use the same transfer mechanisms as regular users (if they've undergone the full registration procedure). For information about this, please see [the section in the general documentation](shell-and-data-access/). Apart from the standard mechanisms, for PRACE users to transfer data to/from Salomon cluster, a GridFTP server running Globus Toolkit GridFTP service is available. The service is available from public Internet as well as from the internal PRACE network (accessible only from other PRACE partners). There's one control server and three backend servers for striping and/or backup in case one of them would fail. -**Access from PRACE network:** +### Access From PRACE Network -|Login address|Port|Node role| -|---|---|---| -|gridftp-prace.salomon.it4i.cz|2812|Front end /control server| -|lgw1-prace.salomon.it4i.cz|2813|Backend / data mover server| -|lgw2-prace.salomon.it4i.cz|2813|Backend / data mover server| -|lgw3-prace.salomon.it4i.cz|2813|Backend / data mover server| +| Login address | Port | Node role | +| ----------------------------- | ---- | --------------------------- | +| gridftp-prace.salomon.it4i.cz | 2812 | Front end /control server | +| lgw1-prace.salomon.it4i.cz | 2813 | Backend / data mover server | +| lgw2-prace.salomon.it4i.cz | 2813 | Backend / data mover server | +| lgw3-prace.salomon.it4i.cz | 2813 | Backend / data mover server | Copy files **to** Salomon by running the following commands on your local machine: @@ -144,7 +142,7 @@ Copy files **to** Salomon by running the following commands on your local machin $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ @@ -156,20 +154,20 @@ Copy files **from** Salomon: $ globus-url-copy gsiftp://gridftp-prace.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy gsiftp://`prace_service -i -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ ``` -**Access from public Internet:** +### Access From Public Internet -|Login address|Port|Node role| -|---|---|---|---| -|gridftp.salomon.it4i.cz|2812|Front end /control server| -|lgw1.salomon.it4i.cz|2813|Backend / data mover server| -|lgw2.salomon.it4i.cz|2813|Backend / data mover server| -|lgw3.salomon.it4i.cz|2813|Backend / data mover server| +| Login address | Port | Node role | +| ----------------------- | ---- | --------------------------- | +| gridftp.salomon.it4i.cz | 2812 | Front end /control server | +| lgw1.salomon.it4i.cz | 2813 | Backend / data mover server | +| lgw2.salomon.it4i.cz | 2813 | Backend / data mover server | +| lgw3.salomon.it4i.cz | 2813 | Backend / data mover server | Copy files **to** Salomon by running the following commands on your local machine: @@ -177,7 +175,7 @@ Copy files **to** Salomon by running the following commands on your local machin $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy file://_LOCAL_PATH_TO_YOUR_FILE_ gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ @@ -189,7 +187,7 @@ Copy files **from** Salomon: $ globus-url-copy gsiftp://gridftp.salomon.it4i.cz:2812/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ ``` -Or by using prace_service script: +Or by using prace_service script: ```bash $ globus-url-copy gsiftp://`prace_service -e -f salomon`/home/prace/_YOUR_ACCOUNT_ON_SALOMON_/_PATH_TO_YOUR_FILE_ file://_LOCAL_PATH_TO_YOUR_FILE_ @@ -197,22 +195,23 @@ Or by using prace_service script: Generally both shared file systems are available through GridFTP: -|File system mount point|Filesystem|Comment| -|---|---|---| -|/home|Lustre|Default HOME directories of users in format /home/prace/login/| -|/scratch|Lustre|Shared SCRATCH mounted on the whole cluster| +| File system mount point | Filesystem | Comment | +| ----------------------- | ---------- | -------------------------------------------------------------- | +| /home | Lustre | Default HOME directories of users in format /home/prace/login/ | +| /scratch | Lustre | Shared SCRATCH mounted on the whole cluster | More information about the shared file systems is available [here](storage/). -Please note, that for PRACE users a "prace" directory is used also on the SCRATCH file system. +!!! hint + `prace` directory is used for PRACE users on the SCRATCH file system. + +| Data type | Default path | +| ---------------------------- | ------------------------------- | +| large project files | /scratch/work/user/prace/login/ | +| large scratch/temporary data | /scratch/temp/ | -|Data type|Default path| -|---|---| -|large project files|/scratch/work/user/prace/login/| -|large scratch/temporary data|/scratch/temp/| +## Usage of the Cluster -Usage of the cluster --------------------- There are some limitations for PRACE user when using the cluster. By default PRACE users aren't allowed to access special queues in the PBS Pro to have high priority or exclusive access to some special equipment like accelerated nodes and high memory (fat) nodes. There may be also restrictions obtaining a working license for the commercial software installed on the cluster, mostly because of the license agreement or because of insufficient amount of licenses. For production runs always use scratch file systems. The available file systems are described [here](storage/). @@ -221,7 +220,7 @@ For production runs always use scratch file systems. The available file systems All system wide installed software on the cluster is made available to the users via the modules. The information about the environment and modules usage is in this [section of general documentation](environment-and-modules/). -PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/PRACE-common-production). +PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/prace-common-production-environment/). ```bash $ module load prace @@ -233,12 +232,11 @@ General information about the resource allocation, job queuing and job execution For PRACE users, the default production run queue is "qprace". PRACE users can also use two other queues "qexp" and "qfree". - |queue|Active project|Project resources|Nodes|priority|authorization|walltime | - |---|---|---|---|---|---|---| - |**qexp** Express queue|no|none required|32 nodes, max 8 per user|150|no|1 / 1 h| - |**qprace** Production queue|yes|>0|1006 nodes, max 86 per job|0|no|24 / 48 h| - |**qfree** Free resource queue|yes|none required|752 nodes, max 86 per job|-1024|no|12 / 12 h| - +| queue | Active project | Project resources | Nodes | priority | authorization | walltime | +| ----------------------------- | -------------- | ----------------- | -------------------------- | -------- | ------------- | --------- | +| **qexp** Express queue | no | none required | 32 nodes, max 8 per user | 150 | no | 1 / 1 h | +| **qprace** Production queue | yes | >0 | 1006 nodes, max 86 per job | 0 | no | 24 / 48 h | +| **qfree** Free resource queue | yes | none required | 752 nodes, max 86 per job | -1024 | no | 12 / 12 h | **qprace**, the PRACE This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprace. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprace is 48 hours. If the job needs longer time, it must use checkpoint/restart functionality. @@ -248,10 +246,10 @@ The resources that are currently subject to accounting are the core hours. The c PRACE users should check their project accounting using the [PRACE Accounting Tool (DART)](http://www.prace-ri.eu/accounting-report-tool/). -Users who have undergone the full local registration procedure (including signing the IT4Innovations Acceptable Use Policy) and who have received local password may check at any time, how many core-hours have been consumed by themselves and their projects using the command "it4ifree". Please note that you need to know your user password to use the command and that the displayed core hours are "system core hours" which differ from PRACE "standardized core hours". +Users who have undergone the full local registration procedure (including signing the IT4Innovations Acceptable Use Policy) and who have received local password may check at any time, how many core-hours have been consumed by themselves and their projects using the command "it4ifree". You need to know your user password to use the command and that the displayed core hours are "system core hours" which differ from PRACE "standardized core hours". -!!! Note "Note" - The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> +!!! note + The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> ```bash $ it4ifree diff --git a/docs.it4i/salomon/resource-allocation-and-job-execution.md b/docs.it4i/salomon/resource-allocation-and-job-execution.md index 283ae774617c28048f6f8d9e892bd2dbfe8ae73b..a28c2a63a19b0de082214d7e2a2e93da91b0d0e8 100644 --- a/docs.it4i/salomon/resource-allocation-and-job-execution.md +++ b/docs.it4i/salomon/resource-allocation-and-job-execution.md @@ -1,28 +1,27 @@ -Resource Allocation and Job Execution -===================================== +# Resource Allocation and Job Execution To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide. -Resources Allocation Policy ---------------------------- +## Resources Allocation Policy + The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](job-priority/) at Salomon ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are available to Anselm users: -- **qexp**, the Express queue -- **qprod**, the Production queue -- **qlong**, the Long queue -- **qmpp**, the Massively parallel queue -- **qfat**, the queue to access SMP UV2000 machine -- **qfree**, the Free resource utilization queue +* **qexp**, the Express queue +* **qprod**, the Production queue +* **qlong**, the Long queue +* **qmpp**, the Massively parallel queue +* **qfat**, the queue to access SMP UV2000 machine +* **qfree**, the Free resource utilization queue -!!! Note "Note" - Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/> +!!! note + Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/> Read more on the [Resource Allocation Policy](resources-allocation-policy/) page. -Job submission and execution ----------------------------- -!!! Note "Note" - Use the **qsub** command to submit your jobs. +## Job Submission and Execution + +!!! note + Use the **qsub** command to submit your jobs. The qsub submits the job into the queue. The qsub command creates a request to the PBS Job manager for allocation of specified resources. The **smallest allocation unit is entire node, 24 cores**, with exception of the qexp queue. The resources will be allocated when available, subject to allocation policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.** diff --git a/docs.it4i/salomon/resources-allocation-policy.md b/docs.it4i/salomon/resources-allocation-policy.md index dd5f736adbecf56de562d1ef09974a73a0c15b87..d705a527d4ed1e0988a4c76575687c23239e41de 100644 --- a/docs.it4i/salomon/resources-allocation-policy.md +++ b/docs.it4i/salomon/resources-allocation-policy.md @@ -1,38 +1,35 @@ -Resources Allocation Policy -=========================== +# Resources Allocation Policy -Resources Allocation Policy ---------------------------- The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview: -!!! Note "Note" - Check the queue status at https://extranet.it4i.cz/rsweb/salomon/ +!!! note + Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/> - |queue |active project |project resources |nodes|min ncpus |priority|authorization|walltime | - | --- | --- |--- |--- |--- |--- |--- |--- | - |**qexe** Express queue|no |none required |32 nodes, max 8 per user |24 |150 |no |1 / 1h | - |**qprod** Production queue|yes |> 0 |1006 nodes, max 86 per job |24 |0 |no |24 / 48h | - |**qlong** Long queue |yes |> 0 |256 nodes, max 40 per job, only non-accelerated nodes allowed |24 |0 |no |72 / 144h | - |**qmpp** Massive parallel queue |yes |> 0 |1006 nodes |24 |0 |yes |2 / 4h | - |**qfat** UV2000 queue |yes |> 0 |1 (uv1) |8 |0 |yes |24 / 48h | - |**qfree** Free resource queue|yes |none required |752 nodes, max 86 per job |24 |-1024 |no |12 / 12h | - |**qviz** Visualization queue |yes |none required |2 (with NVIDIA Quadro K5000) |4 |150 |no |1 / 8h | +| queue | active project | project resources | nodes | min ncpus | priority | authorization | walltime | +| ------------------------------- | -------------- | ----------------- | ------------------------------------------------------------- | --------- | -------- | ------------- | --------- | +| **qexe** Express queue | no | none required | 32 nodes, max 8 per user | 24 | 150 | no | 1 / 1h | +| **qprod** Production queue | yes | > 0 | 1006 nodes, max 86 per job | 24 | 0 | no | 24 / 48h | +| **qlong** Long queue | yes | > 0 | 256 nodes, max 40 per job, only non-accelerated nodes allowed | 24 | 0 | no | 72 / 144h | +| **qmpp** Massive parallel queue | yes | > 0 | 1006 nodes | 24 | 0 | yes | 2 / 4h | +| **qfat** UV2000 queue | yes | > 0 | 1 (uv1) | 8 | 0 | yes | 24 / 48h | +| **qfree** Free resource queue | yes | none required | 752 nodes, max 86 per job | 24 | -1024 | no | 12 / 12h | +| **qviz** Visualization queue | yes | none required | 2 (with NVIDIA Quadro K5000) | 4 | 150 | no | 1 / 8h | -!!! Note "Note" - **The qfree queue is not free of charge**. [Normal accounting](resources-allocation-policy/#resources-accounting-policy) applies. However, it allows for utilization of free resources, once a Project exhausted all its allocated computational resources. This does not apply for Directors Discreation's projects (DD projects) by default. Usage of qfree after exhaustion of DD projects computational resources is allowed after request for this queue. +!!! note + **The qfree queue is not free of charge**. [Normal accounting](resources-allocation-policy/#resources-accounting-policy) applies. However, it allows for utilization of free resources, once a Project exhausted all its allocated computational resources. This does not apply for Directors Discreation's projects (DD projects) by default. Usage of qfree after exhaustion of DD projects computational resources is allowed after request for this queue. -- **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour. -- **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, however only 86 per job. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours. -- **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 336 nodes without acceleration may be accessed via the qlong queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 * 48 h) -- **qmpp**, the massively parallel queue. This queue is intended for massively parallel runs. It is required that active project with nonzero remaining resources is specified to enter the qmpp. All nodes may be accessed via the qmpp queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qmpp is 4 hours. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project. -- **qfat**, the UV2000 queue. This queue is dedicated to access the fat SGI UV2000 SMP machine. The machine (uv1) has 112 Intel IvyBridge cores at 3.3GHz and 3.25TB RAM. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project. -- **qfree**, the Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue, however no remaining resources are required. Consumed resources will be accounted to the Project. Only 178 nodes without accelerator may be accessed from this queue. Full nodes, 24 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours. -- **qviz**, the Visualization queue: Intended for pre-/post-processing using OpenGL accelerated graphics. Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 73 GB of RAM and 1/7 of the GPU capacity (default "chunk"). If more GPU power or RAM is required, it is recommended to allocate more chunks (with 4 cores each) up to one whole node per user, so that all 28 cores, 512 GB RAM and whole GPU is exclusive. This is currently also the maximum allowed allocation per one user. One hour of work is allocated by default, the user may ask for 2 hours maximum. +* **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour. +* **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, however only 86 per job. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours. +* **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 336 nodes without acceleration may be accessed via the qlong queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 \* 48 h) +* **qmpp**, the massively parallel queue. This queue is intended for massively parallel runs. It is required that active project with nonzero remaining resources is specified to enter the qmpp. All nodes may be accessed via the qmpp queue. Full nodes, 24 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qmpp is 4 hours. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project. +* **qfat**, the UV2000 queue. This queue is dedicated to access the fat SGI UV2000 SMP machine. The machine (uv1) has 112 Intel IvyBridge cores at 3.3GHz and 3.25TB RAM. An PI needs explicitly ask support for authorization to enter the queue for all users associated to her/his Project. +* **qfree**, the Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue, however no remaining resources are required. Consumed resources will be accounted to the Project. Only 178 nodes without accelerator may be accessed from this queue. Full nodes, 24 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours. +* **qviz**, the Visualization queue: Intended for pre-/post-processing using OpenGL accelerated graphics. Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 73 GB of RAM and 1/7 of the GPU capacity (default "chunk"). If more GPU power or RAM is required, it is recommended to allocate more chunks (with 4 cores each) up to one whole node per user, so that all 28 cores, 512 GB RAM and whole GPU is exclusive. This is currently also the maximum allowed allocation per one user. One hour of work is allocated by default, the user may ask for 2 hours maximum. -!!! Note "Note" - To access node with Xeon Phi co-processor user needs to specify that in [job submission select statement](job-submission-and-execution/). +!!! note + To access node with Xeon Phi co-processor user needs to specify that in [job submission select statement](job-submission-and-execution/). -### Notes +## Notes The job wall clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be [set manually, see examples](job-submission-and-execution/). @@ -40,10 +37,10 @@ Jobs that exceed the reserved wall clock time (Req'd Time) get killed automatica Salomon users may check current queue configuration at <https://extranet.it4i.cz/rsweb/salomon/queues>. -### Queue status +## Queue Status -!!! Note "Note" - Check the status of jobs, queues and compute nodes at [https://extranet.it4i.cz/rsweb/salomon/](https://extranet.it4i.cz/rsweb/salomon) +!!! note + Check the status of jobs, queues and compute nodes at [https://extranet.it4i.cz/rsweb/salomon/](https://extranet.it4i.cz/rsweb/salomon)  @@ -62,9 +59,9 @@ Usage: rspbs [options] Options: --version show program's version number and exit -h, --help show this help message and exit - --get-server-details Print server + --get-server-details Print server --get-queues Print queues - --get-queues-details Print queues details + --get-queues-details Print queues details --get-reservations Print reservations --get-reservations-details Print reservations details @@ -95,7 +92,7 @@ Options: --get-user-ncpus Print number of allocated ncpus per user --get-qlist-nodes Print qlist nodes --get-qlist-nodeset Print qlist nodeset - --get-ibswitch-nodes Print ibswitch nodes + --get-ibswitch-nodes Print ibswitch nodes --get-ibswitch-nodeset Print ibswitch nodeset --summary Print summary @@ -112,17 +109,16 @@ Options: --incl-finished Include finished jobs ``` -Resources Accounting Policy -------------------------------- +## Resources Accounting Policy -### The Core-Hour +### Core-Hours The resources that are currently subject to accounting are the core-hours. The core-hours are accounted on the wall clock basis. The accounting runs whenever the computational cores are allocated or blocked via the PBS Pro workload manager (the qsub command), regardless of whether the cores are actually used for any calculation. 1 core-hour is defined as 1 processor core allocated for 1 hour of wall clock time. Allocating a full node (24 cores) for 1 hour accounts to 24 core-hours. See example in the [Job submission and execution](job-submission-and-execution/) section. -### Check consumed resources +### Check Consumed Resources -!!! Note "Note" - The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> +!!! note + The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients> User may check at any time, how many core-hours have been consumed by himself/herself and his/her projects. The command is available on clusters' login nodes. diff --git a/docs.it4i/salomon/shell-and-data-access.md b/docs.it4i/salomon/shell-and-data-access.md index b214bd5f18ea08c33a0fc3ce4c02a9f30af06ed5..8c0012f110667a08217b18f117bec54f6f7e9dda 100644 --- a/docs.it4i/salomon/shell-and-data-access.md +++ b/docs.it4i/salomon/shell-and-data-access.md @@ -1,27 +1,26 @@ -Accessing the Cluster -============================== +# Accessing the Cluster + +## Shell Access -Shell Access ------------------ The Salomon cluster is accessed by SSH protocol via login nodes login1, login2, login3 and login4 at address salomon.it4i.cz. The login nodes may be addressed specifically, by prepending the login node name to the address. -!!! Note "Note" - The alias salomon.it4i.cz is currently not available through VPN connection. Please use loginX.salomon.it4i.cz when connected to VPN. +!!! note + The alias salomon.it4i.cz is currently not available through VPN connection. Please use loginX.salomon.it4i.cz when connected to VPN. - |Login address|Port|Protocol|Login node| - |---|---|---|---| - |salomon.it4i.cz|22|ssh|round-robin DNS record for login[1-4]| - |login1.salomon.it4i.cz|22|ssh|login1| - |login1.salomon.it4i.cz|22|ssh|login1| - |login1.salomon.it4i.cz|22|ssh|login1| - |login1.salomon.it4i.cz|22|ssh|login1| +| Login address | Port | Protocol | Login node | +| ---------------------- | ---- | -------- | ------------------------------------- | +| salomon.it4i.cz | 22 | ssh | round-robin DNS record for login[1-4] | +| login1.salomon.it4i.cz | 22 | ssh | login1 | +| login1.salomon.it4i.cz | 22 | ssh | login1 | +| login1.salomon.it4i.cz | 22 | ssh | login1 | +| login1.salomon.it4i.cz | 22 | ssh | login1 | -The authentication is by the [private key](../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) -!!! Note "Note" - Please verify SSH fingerprints during the first logon. They are identical on all login nodes: - f6:28:98:e4:f9:b2:a6:8f:f2:f4:2d:0a:09:67:69:80 (DSA) - 70:01:c9:9a:5d:88:91:c7:1b:c0:84:d1:fa:4e:83:5c (RSA) +!!! note + Please verify SSH fingerprints during the first logon. They are identical on all login nodes: + f6:28:98:e4:f9:b2:a6:8f:f2:f4:2d:0a:09:67:69:80 (DSA) + 70:01:c9:9a:5d:88:91:c7:1b:c0:84:d1:fa:4e:83:5c (RSA) Private key authentication: @@ -37,42 +36,42 @@ If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to local $ chmod 600 /path/to/id_rsa ``` -On **Windows**, use [PuTTY ssh client](../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md). +On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-access-and-data-transfer/putty.md). After logging in, you will see the command prompt: ```bash - _____ _ - / ____| | | - | (___ __ _| | ___ _ __ ___ ___ _ __ - \___ \ / _` | |/ _ \| '_ ` _ \ / _ \| '_ \ + _____ _ + / ____| | | + | (___ __ _| | ___ _ __ ___ ___ _ __ + \___ \ / _` | |/ _ \| '_ ` _ \ / _ \| '_ \ ____) | (_| | | (_) | | | | | | (_) | | | | |_____/ \__,_|_|\___/|_| |_| |_|\___/|_| |_| - + http://www.it4i.cz/?lang=en -Last login: Tue Jul 9 15:57:38 2013 from your-host.example.com +Last login: Tue Jul 9 15:57:38 2013 from your-host.example.com [username@login2.salomon ~]$ ``` -!!! Note "Note" - The environment is **not** shared between login nodes, except for [shared filesystems](storage/). +!!! note + The environment is **not** shared between login nodes, except for [shared filesystems](storage/). + +## Data Transfer -Data Transfer -------------- Data in and out of the system may be transferred by the [scp](http://en.wikipedia.org/wiki/Secure_copy) and sftp protocols. - |Address|Port|Protocol| - |---|---|---| - |salomon.it4i.cz|22|scp, sftp| - |login1.salomon.it4i.cz|22|scp, sftp| - |login2.salomon.it4i.cz|22|scp, sftp| - |login3.salomon.it4i.cz|22|scp, sftp| - |login4.salomon.it4i.cz|22|scp, sftp| +| Address | Port | Protocol | +| ---------------------- | ---- | --------- | +| salomon.it4i.cz | 22 | scp, sftp | +| login1.salomon.it4i.cz | 22 | scp, sftp | +| login2.salomon.it4i.cz | 22 | scp, sftp | +| login3.salomon.it4i.cz | 22 | scp, sftp | +| login4.salomon.it4i.cz | 22 | scp, sftp | -The authentication is by the [private key](../get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) On linux or Mac, use scp or sftp client to transfer the data to Salomon: @@ -110,29 +109,28 @@ On Windows, use [WinSCP client](http://winscp.net/eng/download.php) to transfer More information about the shared file systems is available [here](storage/). -Connection restrictions ------------------------ +## Connection Restrictions + Outgoing connections, from Salomon Cluster login nodes to the outside world, are restricted to following ports: -|Port|Protocol| -|---|---| -|22|ssh| -|80|http| -|443|https| -|9418|git| +| Port | Protocol | +| ---- | -------- | +| 22 | ssh | +| 80 | http | +| 443 | https | +| 9418 | git | -!!! Note "Note" - Please use **ssh port forwarding** and proxy servers to connect from Salomon to all other remote ports. +!!! note + Please use **ssh port forwarding** and proxy servers to connect from Salomon to all other remote ports. Outgoing connections, from Salomon Cluster compute nodes are restricted to the internal network. Direct connections form compute nodes to outside world are cut. -Port forwarding ---------------- +## Port Forwarding -### Port forwarding from login nodes +### Port Forwarding From Login Nodes -!!! Note "Note" - Port forwarding allows an application running on Salomon to connect to arbitrary remote host and port. +!!! note + Port forwarding allows an application running on Salomon to connect to arbitrary remote host and port. It works by tunneling the connection from Salomon back to users workstation and forwarding from the workstation to the remote host. @@ -142,9 +140,9 @@ Pick some unused port on Salomon login node (for example 6000) and establish th local $ ssh -R 6000:remote.host.com:1234 salomon.it4i.cz ``` -In this example, we establish port forwarding between port 6000 on Salomon and port 1234 on the remote.host.com. By accessing localhost:6000 on Salomon, an application will see response of remote.host.com:1234. The traffic will run via users local workstation. +In this example, we establish port forwarding between port 6000 on Salomon and port 1234 on the remote.host.com. By accessing localhost:6000 on Salomon, an application will see response of remote.host.com:1234. The traffic will run via users local workstation. -Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration screen, load your Salomon configuration first. Then go to Connection->SSH->Tunnels to set up the port forwarding. Click Remote radio button. Insert 6000 to Source port textbox. Insert remote.host.com:1234. Click Add button, then Open. +Port forwarding may be done **using PuTTY** as well. On the PuTTY Configuration screen, load your Salomon configuration first. Then go to Connection->SSH->Tunnels to set up the port forwarding. Click Remote radio button. Insert 6000 to Source port textbox. Insert remote.host.com:1234. Click Add button, then Open. Port forwarding may be established directly to the remote host. However, this requires that user has ssh access to remote.host.com @@ -154,7 +152,7 @@ $ ssh -L 6000:localhost:1234 remote.host.com Note: Port number 6000 is chosen as an example only. Pick any free port. -### Port forwarding from compute nodes +### Port Forwarding From Compute Nodes Remote port forwarding from compute nodes allows applications running on the compute nodes to access hosts outside Salomon Cluster. @@ -168,12 +166,12 @@ $ ssh -TN -f -L 6000:localhost:6000 login1 In this example, we assume that port forwarding from login1:6000 to remote.host.com:1234 has been established beforehand. By accessing localhost:6000, an application running on a compute node will see response of remote.host.com:1234 -### Using proxy servers +### Using Proxy Servers Port forwarding is static, each single port is mapped to a particular port on remote host. Connection to other remote host, requires new forward. -!!! Note "Note" - Applications with inbuilt proxy support, experience unlimited access to remote hosts, via single proxy server. +!!! note + Applications with inbuilt proxy support, experience unlimited access to remote hosts, via single proxy server. To establish local proxy server on your workstation, install and run SOCKS proxy server software. On Linux, sshd demon provides the functionality. To establish SOCKS proxy server listening on port 1080 run: @@ -189,15 +187,13 @@ Once the proxy server is running, establish ssh port forwarding from Salomon to local $ ssh -R 6000:localhost:1080 salomon.it4i.cz ``` -Now, configure the applications proxy settings to **localhost:6000**. Use port forwarding to access the [proxy server from compute nodes](#port-forwarding-from-compute-nodes) as well. +Now, configure the applications proxy settings to **localhost:6000**. Use port forwarding to access the [proxy server from compute nodes](#port-forwarding-from-compute-nodes) as well. -Graphical User Interface ------------------------- +## Graphical User Interface -- The [X Window system](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters. -- The [Virtual Network Computing](../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). +* The [X Window system](../general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters. +* The [Virtual Network Computing](../general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). -VPN Access ----------- +## VPN Access -- Access to IT4Innovations internal resources via [VPN](../get-started-with-it4innovations/vpn-access/). +* Access to IT4Innovations internal resources via [VPN](../general/accessing-the-clusters/vpn-access/). diff --git a/docs.it4i/salomon/software/ansys/ansys-cfx.md b/docs.it4i/salomon/software/ansys/ansys-cfx.md index 9bd7ced93f1ec946a86e598d975cbbb35b7b552f..21ce8f93b16958a184d15af5235830e9d39406b9 100644 --- a/docs.it4i/salomon/software/ansys/ansys-cfx.md +++ b/docs.it4i/salomon/software/ansys/ansys-cfx.md @@ -1,7 +1,6 @@ -ANSYS CFX -========= +# ANSYS CFX -[ANSYS CFX](http://www.ansys.com/Products/Simulation+Technology/Fluid+Dynamics/Fluid+Dynamics+Products/ANSYS+CFX) software is a high-performance, general purpose fluid dynamics program that has been applied to solve wide-ranging fluid flow problems for over 20 years. At the heart of ANSYS CFX is its advanced solver technology, the key to achieving reliable and accurate solutions quickly and robustly. The modern, highly parallelized solver is the foundation for an abundant choice of physical models to capture virtually any type of phenomena related to fluid flow. The solver and its many physical models are wrapped in a modern, intuitive, and flexible GUI and user environment, with extensive capabilities for customization and automation using session files, scripting and a powerful expression language. +[ANSYS CFX](http://www.ansys.com/products/fluids/ansys-cfx) software is a high-performance, general purpose fluid dynamics program that has been applied to solve wide-ranging fluid flow problems for over 20 years. At the heart of ANSYS CFX is its advanced solver technology, the key to achieving reliable and accurate solutions quickly and robustly. The modern, highly parallelized solver is the foundation for an abundant choice of physical models to capture virtually any type of phenomena related to fluid flow. The solver and its many physical models are wrapped in a modern, intuitive, and flexible GUI and user environment, with extensive capabilities for customization and automation using session files, scripting and a powerful expression language. To run ANSYS CFX in batch mode you can utilize/modify the default cfx.pbs script and execute it via the qsub command. @@ -35,7 +34,7 @@ procs_per_host=1 hl="" for host in `cat $PBS_NODEFILE` do - if [ "$hl" = "" ] + if ["$hl" = "" ] then hl="$host:$procs_per_host" else hl="${hl}:$host:$procs_per_host" fi @@ -48,7 +47,7 @@ echo Machines: $hl /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the pbs file (above) is common and description can be find on [this site](../../job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def diff --git a/docs.it4i/salomon/software/ansys/ansys-fluent.md b/docs.it4i/salomon/software/ansys/ansys-fluent.md index 4d8aa00357dd32dd82262acb921683df5f25fbd2..33e711b285cc8066604c43ebb7c943dcb1294fb6 100644 --- a/docs.it4i/salomon/software/ansys/ansys-fluent.md +++ b/docs.it4i/salomon/software/ansys/ansys-fluent.md @@ -1,11 +1,10 @@ -ANSYS Fluent -============ +# ANSYS Fluent -[ANSYS Fluent](http://www.ansys.com/Products/Simulation+Technology/Fluid+Dynamics/Fluid+Dynamics+Products/ANSYS+Fluent) +[ANSYS Fluent](http://www.ansys.com/products/fluids/ansys-fluent) software contains the broad physical modeling capabilities needed to model flow, turbulence, heat transfer, and reactions for industrial applications ranging from air flow over an aircraft wing to combustion in a furnace, from bubble columns to oil platforms, from blood flow to semiconductor manufacturing, and from clean room design to wastewater treatment plants. Special models that give the software the ability to model in-cylinder combustion, aeroacoustics, turbomachinery, and multiphase systems have served to broaden its reach. 1. Common way to run Fluent over pbs file ------------------------------------------------------- + To run ANSYS Fluent in batch mode you can utilize/modify the default fluent.pbs script and execute it via the qsub command. ```bash @@ -57,18 +56,17 @@ Journal file with definition of the input geometry and boundary conditions and d The appropriate dimension of the problem has to be set by parameter (2d/3d). -2. Fast way to run Fluent from command line --------------------------------------------------------- +1. Fast way to run Fluent from command line ```bash fluent solver_version [FLUENT_options] -i journal_file -pbs ``` -This syntax will start the ANSYS FLUENT job under PBS Professional using the qsub command in a batch manner. When resources are available, PBS Professional will start the job and return a job ID, usually in the form of *job_ID.hostname*. This job ID can then be used to query, control, or stop the job using standard PBS Professional commands, such as qstat or qdel. The job will be run out of the current working directory, and all output will be written to the file fluent.o *job_ID*. +This syntax will start the ANSYS FLUENT job under PBS Professional using the qsub command in a batch manner. When resources are available, PBS Professional will start the job and return a job ID, usually in the form of _job_ID.hostname_. This job ID can then be used to query, control, or stop the job using standard PBS Professional commands, such as qstat or qdel. The job will be run out of the current working directory, and all output will be written to the file fluent.o _job_ID_. + +1. Running Fluent via user's config file -3. Running Fluent via user's config file ----------------------------------------- -The sample script uses a configuration file called pbs_fluent.conf if no command line arguments are present. This configuration file should be present in the directory from which the jobs are submitted (which is also the directory in which the jobs are executed). The following is an example of what the content of pbs_fluent.conf can be: +The sample script uses a configuration file called pbs_fluent.conf if no command line arguments are present. This configuration file should be present in the directory from which the jobs are submitted (which is also the directory in which the jobs are executed). The following is an example of what the content of pbs_fluent.conf can be: ```bash input="example_small.flin" @@ -102,7 +100,7 @@ To run ANSYS Fluent in batch mode with user's config file you can utilize/modify cd $PBS_O_WORKDIR #We assume that if they didn’t specify arguments then they should use the - #config file if [ "xx${input}${case}${mpp}${fluent_args}zz" = "xxzz" ]; then + #config file if ["xx${input}${case}${mpp}${fluent_args}zz" = "xxzz" ]; then if [ -f pbs_fluent.conf ]; then . pbs_fluent.conf else @@ -143,8 +141,8 @@ To run ANSYS Fluent in batch mode with user's config file you can utilize/modify It runs the jobs out of the directory from which they are submitted (PBS_O_WORKDIR). -4. Running Fluent in parralel ------------------------------ +1. Running Fluent in parralel + Fluent could be run in parallel only under Academic Research license. To do so this ANSYS Academic Research license must be placed before ANSYS CFD license in user preferences. To make this change anslic_admin utility should be run ```bash diff --git a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md index c2ce777868c34566878c2c7b20d520ddb45bee14..8646c26665ea9f10d6d70405e961f1e2efe7fbb9 100644 --- a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md +++ b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md @@ -1,7 +1,6 @@ -ANSYS LS-DYNA -============= +# ANSYS LS-DYNA -**[ANSYSLS-DYNA](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/Explicit+Dynamics/ANSYS+LS-DYNA)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. +**[ANSYSLS-DYNA](http://www.ansys.com/products/structures/ansys-ls-dyna)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. To run ANSYS LS-DYNA in batch mode you can utilize/modify the default ansysdyna.pbs script and execute it via the qsub command. @@ -40,7 +39,7 @@ procs_per_host=1 hl="" for host in `cat $PBS_NODEFILE` do - if [ "$hl" = "" ] + if ["$hl" = "" ] then hl="$host:$procs_per_host" else hl="${hl}:$host:$procs_per_host" fi @@ -51,6 +50,6 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the pbs file (above) is common and description can be find on [this site](../../job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i= diff --git a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md index 939a599b92f29b07e8f4836d604753c20e57592a..c1562c1c23ca09fe308536c45f1c903ab8384b3e 100644 --- a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md +++ b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md @@ -1,7 +1,6 @@ -ANSYS MAPDL -=========== +# ANSYS MAPDL -**[ANSYS Multiphysics](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/ANSYS+Multiphysics)** +**[ANSYS Multiphysics](http://www.ansys.com/products/multiphysics)** software offers a comprehensive product solution for both multiphysics and single-physics analysis. The product includes structural, thermal, fluid and both high- and low-frequency electromagnetic analysis. The product also contains solutions for both direct and sequentially coupled physics problems including direct coupled-field elements and the ANSYS multi-field solver. To run ANSYS MAPDL in batch mode you can utilize/modify the default mapdl.pbs script and execute it via the qsub command. @@ -36,7 +35,7 @@ procs_per_host=1 hl="" for host in `cat $PBS_NODEFILE` do - if [ "$hl" = "" ] + if ["$hl" = "" ] then hl="$host:$procs_per_host" else hl="${hl}:$host:$procs_per_host" fi diff --git a/docs.it4i/salomon/software/ansys/ansys.md b/docs.it4i/salomon/software/ansys/ansys.md index 093bcf9567db939e38673fc9dd5373b1f3d14c37..f93524a3e580f8a5c83302f8d1cd9997bb68c2be 100644 --- a/docs.it4i/salomon/software/ansys/ansys.md +++ b/docs.it4i/salomon/software/ansys/ansys.md @@ -1,9 +1,8 @@ -Overview of ANSYS Products -========================== +# Overview of ANSYS Products **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM) -Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa_**" in the license feature name. Change of license is realized on command line respectively directly in user's pbs file (see individual products). [ More about licensing here](licensing/) +Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's pbs file (see individual products). [More about licensing here](licensing/) To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module: @@ -14,4 +13,3 @@ To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL, ANSYS supports interactive regime, but due to assumed solution of extremely difficult tasks it is not recommended. If user needs to work in interactive regime we recommend to configure the RSM service on the client machine which allows to forward the solution to the Anselm directly from the client's Workbench project (see ANSYS RSM service). - diff --git a/docs.it4i/salomon/software/ansys/licensing.md b/docs.it4i/salomon/software/ansys/licensing.md index 63842ea29dc85a0480bb5d34d8812d633f45844d..04ff6513349ccede25a0846dd21227251e954732 100644 --- a/docs.it4i/salomon/software/ansys/licensing.md +++ b/docs.it4i/salomon/software/ansys/licensing.md @@ -1,25 +1,24 @@ -Licensing and Available Versions -================================ +# Licensing and Available Versions -ANSYS licence can be used by: ------------------------------ -- all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.) -- all persons who have a valid license -- students of the Technical University +## ANSYS Licence Can Be Used By: + +* all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.) +* all persons who have a valid license +* students of the Technical University + +## ANSYS Academic Research -ANSYS Academic Research ------------------------ The licence intended to be used for science and research, publications, students’ projects (academic licence). -ANSYS COM ---------- +## ANSYS COM + The licence intended to be used for science and research, publications, students’ projects, commercial research with no commercial use restrictions. -Available Versions ------------------- -- 16.1 -- 17.0 +## Available Versions + +* 16.1 +* 17.0 + +## License Preferences -License Preferences -------------------- Please [see this page to set license preferences](setting-license-preferences/). diff --git a/docs.it4i/salomon/software/ansys/setting-license-preferences.md b/docs.it4i/salomon/software/ansys/setting-license-preferences.md index 44e0b8bde968c4336d5d7e8cb7e1625ff348cda3..fe14541d46b1fe4cab38eb7b883c58e40e03dd32 100644 --- a/docs.it4i/salomon/software/ansys/setting-license-preferences.md +++ b/docs.it4i/salomon/software/ansys/setting-license-preferences.md @@ -1,9 +1,8 @@ -Setting license preferences -=========================== +# Setting license preferences Some ANSYS tools allow you to explicitly specify usage of academic or commercial licenses in the command line (eg. ansys161 -p aa_r to select Academic Research license). However, we have observed that not all tools obey this option and choose commercial license. -Thus you need to configure preferred license order with ANSLIC_ADMIN. Please follow these steps and move Academic Research license to the top or bottom of the list accordingly. +Thus you need to configure preferred license order with ANSLIC_ADMIN. Please follow these steps and move Academic Research license to the top or bottom of the list accordingly. Launch the ANSLIC_ADMIN utility in a graphical environment: @@ -21,4 +20,4 @@ ANSLIC_ADMIN Utility will be run ANSYS Academic Research license should be moved up to the top or down to the bottom of the list. - \ No newline at end of file + diff --git a/docs.it4i/salomon/software/ansys/workbench.md b/docs.it4i/salomon/software/ansys/workbench.md index af5c9f9ff002efff830d1ce687e961e03c92dac7..8ed07d789dea69798e68c177ac1612a3e391ec88 100644 --- a/docs.it4i/salomon/software/ansys/workbench.md +++ b/docs.it4i/salomon/software/ansys/workbench.md @@ -1,8 +1,7 @@ -Workbench -========= +# Workbench + +## Workbench Batch Mode -Workbench Batch Mode --------------------- It is possible to run Workbench scripts in batch mode. You need to configure solvers of individual components to run in parallel mode. Open your project in Workbench. Then, for example, in Mechanical, go to Tools - Solve Process Settings ...  @@ -13,7 +12,7 @@ Enable Distribute Solution checkbox and enter number of cores (eg. 48 to run on -mpifile /path/to/my/job/mpifile.txt ``` -Where /path/to/my/job is the directory where your project is saved. We will create the file mpifile.txt programatically later in the batch script. For more information, refer to *ANSYS Mechanical APDL Parallel Processing* *Guide*. +Where /path/to/my/job is the directory where your project is saved. We will create the file mpifile.txt programatically later in the batch script. For more information, refer to \*ANSYS Mechanical APDL Parallel Processing\* \*Guide\*. Now, save the project and close Workbench. We will use this script to launch the job: diff --git a/docs.it4i/salomon/software/chemistry/molpro.md b/docs.it4i/salomon/software/chemistry/molpro.md index 787f86054873db8961e5f7fd6798b564a79eda84..ab53760cda8c5efa186e93d7ab9d4b4032979f53 100644 --- a/docs.it4i/salomon/software/chemistry/molpro.md +++ b/docs.it4i/salomon/software/chemistry/molpro.md @@ -1,40 +1,39 @@ -Molpro -====== +# Molpro Molpro is a complete system of ab initio programs for molecular electronic structure calculations. -About Molpro ------------- +## About Molpro + Molpro is a software package used for accurate ab-initio quantum chemistry calculations. More information can be found at the [official webpage](http://www.molpro.net/). -License -------- +## License + Molpro software package is available only to users that have a valid license. Please contact support to enable access to Molpro if you have a valid license appropriate for running on our cluster (eg. academic research group licence, parallel execution). To run Molpro, you need to have a valid license token present in " $HOME/.molpro/token". You can download the token from [Molpro website](https://www.molpro.net/licensee/?portal=licensee). -Installed version ------------------ +## Installed Version + Currently on Anselm is installed version 2010.1, patch level 45, parallel version compiled with Intel compilers and Intel MPI. Compilation parameters are default: -|Parameter|Value| -|---|---| -|max number of atoms|200| -|max number of valence orbitals|300| -|max number of basis functions|4095| -|max number of states per symmmetry|20| -|max number of state symmetries|16| -|max number of records|200| -|max number of primitives|maxbfn x [2]| - -Running ------- +| Parameter | Value | +| ---------------------------------- | ------------ | +| max number of atoms | 200 | +| max number of valence orbitals | 300 | +| max number of basis functions | 4095 | +| max number of states per symmmetry | 20 | +| max number of state symmetries | 16 | +| max number of records | 200 | +| max number of primitives | maxbfn x [2] | + +## Running + Molpro is compiled for parallel execution using MPI and OpenMP. By default, Molpro reads the number of allocated nodes from PBS and launches a data server on one node. On the remaining allocated nodes, compute processes are launched, one process per node, each with 16 threads. You can modify this behavior by using -n, -t and helper-server options. Please refer to the [Molpro documentation](http://www.molpro.net/info/2010.1/doc/manual/node9.html) for more details. -!!! Note "Note" - The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS. +!!! note + The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS. You are advised to use the -d option to point to a directory in [SCRATCH filesystem](../../storage/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch filesystem. diff --git a/docs.it4i/salomon/software/chemistry/nwchem.md b/docs.it4i/salomon/software/chemistry/nwchem.md index 3db648754f6ad50e0ae89758ae825c4cb20956d7..a26fc701ee44585dbab1f942685b92d9190adfa5 100644 --- a/docs.it4i/salomon/software/chemistry/nwchem.md +++ b/docs.it4i/salomon/software/chemistry/nwchem.md @@ -1,21 +1,17 @@ -NWChem -====== +# NWChem -**High-Performance Computational Chemistry** +## Introduction -Introduction -------------------------- NWChem aims to provide its users with computational chemistry tools that are scalable both in their ability to treat large scientific computational chemistry problems efficiently, and in their use of available parallel computing resources from high-performance parallel supercomputers to conventional workstation clusters. [Homepage](http://www.nwchem-sw.org/index.php/Main_Page) -Installed versions ------------------- +## Installed Versions The following versions are currently installed: -- NWChem/6.3.revision2-2013-10-17-Python-2.7.8, current release. Compiled with Intel compilers, MKL and Intel MPI -- NWChem/6.5.revision26243-intel-2015b-2014-09-10-Python-2.7.8 +* NWChem/6.3.revision2-2013-10-17-Python-2.7.8, current release. Compiled with Intel compilers, MKL and Intel MPI +* NWChem/6.5.revision26243-intel-2015b-2014-09-10-Python-2.7.8 For a current list of installed versions, execute: @@ -25,8 +21,8 @@ For a current list of installed versions, execute: The recommend to use version 6.5. Version 6.3 fails on Salomon nodes with accelerator, because it attempts to communicate over scif0 interface. In 6.5 this is avoided by setting ARMCI_OPENIB_DEVICE=mlx4_0, this setting is included in the module. -Running -------- +## Running + NWChem is compiled for parallel MPI execution. Normal procedure for MPI jobs applies. Sample jobscript : ```bash @@ -39,9 +35,9 @@ Running mpirun nwchem h2o.nw ``` -Options --------------------- +## Options + Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : -- MEMORY : controls the amount of memory NWChem will use -- SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct" +* MEMORY : controls the amount of memory NWChem will use +* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct" diff --git a/docs.it4i/salomon/software/chemistry/phono3py.md b/docs.it4i/salomon/software/chemistry/phono3py.md index 5d5487f97eea389aacba3e57566e0ebf865fba13..3f747d23bc9775f80137c0d6e4f1b4821d97439b 100644 --- a/docs.it4i/salomon/software/chemistry/phono3py.md +++ b/docs.it4i/salomon/software/chemistry/phono3py.md @@ -1,21 +1,19 @@ -Phono3py -======== +# Phono3py -Introduction -------------- -This GPL software calculates phonon-phonon interactions via the third order force constants. It allows to obtain lattice thermal conductivity, phonon lifetime/linewidth, imaginary part of self energy at the lowest order, joint density of states (JDOS) and weighted-JDOS. For details see Phys. Rev. B 91, 094306 (2015) and [http://atztogo.github.io/phono3py/index.html](http://atztogo.github.io/phono3py/index.html) +## Introduction -!!! Note "Note" - Load the phono3py/0.9.14-ictce-7.3.5-Python-2.7.9 module +This GPL software calculates phonon-phonon interactions via the third order force constants. It allows to obtain lattice thermal conductivity, phonon lifetime/linewidth, imaginary part of self energy at the lowest order, joint density of states (JDOS) and weighted-JDOS. For details see Phys. Rev. B 91, 094306 (2015) and <http://atztogo.github.io/phono3py/index.html> + +!!! note + Load the phono3py/0.9.14-ictce-7.3.5-Python-2.7.9 module ```bash $ module load phono3py/0.9.14-ictce-7.3.5-Python-2.7.9 ``` -Example of calculating thermal conductivity of Si using VASP code. ------------------------------------------------------------------- +## Example of Calculating Thermal Conductivity of Si Using VASP Code. -### Calculating force constants +### Calculating Force Constants One needs to calculate second order and third order force constants using the diamond structure of silicon stored in [POSCAR](poscar-si) (the same form as in VASP) using single displacement calculations within supercell. @@ -29,17 +27,17 @@ $ cat POSCAR Si 8 Direct - 0.8750000000000000 0.8750000000000000 0.8750000000000000 - 0.8750000000000000 0.3750000000000000 0.3750000000000000 - 0.3750000000000000 0.8750000000000000 0.3750000000000000 - 0.3750000000000000 0.3750000000000000 0.8750000000000000 - 0.1250000000000000 0.1250000000000000 0.1250000000000000 - 0.1250000000000000 0.6250000000000000 0.6250000000000000 - 0.6250000000000000 0.1250000000000000 0.6250000000000000 - 0.6250000000000000 0.6250000000000000 0.1250000000000000 + 0.8750000000000000 0.8750000000000000 0.8750000000000000 + 0.8750000000000000 0.3750000000000000 0.3750000000000000 + 0.3750000000000000 0.8750000000000000 0.3750000000000000 + 0.3750000000000000 0.3750000000000000 0.8750000000000000 + 0.1250000000000000 0.1250000000000000 0.1250000000000000 + 0.1250000000000000 0.6250000000000000 0.6250000000000000 + 0.6250000000000000 0.1250000000000000 0.6250000000000000 + 0.6250000000000000 0.6250000000000000 0.1250000000000000 ``` -### Generating displacement using 2 x 2 x 2 supercell for both second and third order force constants +### Generating Displacement Using 2 by 2 by 2 Supercell for Both Second and Third Order Force Constants ```bash $ phono3py -d --dim="2 2 2" -c POSCAR @@ -49,15 +47,15 @@ $ phono3py -d --dim="2 2 2" -c POSCAR disp_fc3.yaml, and the structure input files with this displacements are POSCAR-00XXX, where the XXX=111. ```bash -disp_fc3.yaml POSCAR-00008 POSCAR-00017 POSCAR-00026 POSCAR-00035 POSCAR-00044 POSCAR-00053 POSCAR-00062 POSCAR-00071 POSCAR-00080 POSCAR-00089 POSCAR-00098 POSCAR-00107 -POSCAR POSCAR-00009 POSCAR-00018 POSCAR-00027 POSCAR-00036 POSCAR-00045 POSCAR-00054 POSCAR-00063 POSCAR-00072 POSCAR-00081 POSCAR-00090 POSCAR-00099 POSCAR-00108 -POSCAR-00001 POSCAR-00010 POSCAR-00019 POSCAR-00028 POSCAR-00037 POSCAR-00046 POSCAR-00055 POSCAR-00064 POSCAR-00073 POSCAR-00082 POSCAR-00091 POSCAR-00100 POSCAR-00109 -POSCAR-00002 POSCAR-00011 POSCAR-00020 POSCAR-00029 POSCAR-00038 POSCAR-00047 POSCAR-00056 POSCAR-00065 POSCAR-00074 POSCAR-00083 POSCAR-00092 POSCAR-00101 POSCAR-00110 -POSCAR-00003 POSCAR-00012 POSCAR-00021 POSCAR-00030 POSCAR-00039 POSCAR-00048 POSCAR-00057 POSCAR-00066 POSCAR-00075 POSCAR-00084 POSCAR-00093 POSCAR-00102 POSCAR-00111 -POSCAR-00004 POSCAR-00013 POSCAR-00022 POSCAR-00031 POSCAR-00040 POSCAR-00049 POSCAR-00058 POSCAR-00067 POSCAR-00076 POSCAR-00085 POSCAR-00094 POSCAR-00103 -POSCAR-00005 POSCAR-00014 POSCAR-00023 POSCAR-00032 POSCAR-00041 POSCAR-00050 POSCAR-00059 POSCAR-00068 POSCAR-00077 POSCAR-00086 POSCAR-00095 POSCAR-00104 -POSCAR-00006 POSCAR-00015 POSCAR-00024 POSCAR-00033 POSCAR-00042 POSCAR-00051 POSCAR-00060 POSCAR-00069 POSCAR-00078 POSCAR-00087 POSCAR-00096 POSCAR-00105 -POSCAR-00007 POSCAR-00016 POSCAR-00025 POSCAR-00034 POSCAR-00043 POSCAR-00052 POSCAR-00061 POSCAR-00070 POSCAR-00079 POSCAR-00088 POSCAR-00097 POSCAR-00106 +disp_fc3.yaml POSCAR-00008 POSCAR-00017 POSCAR-00026 POSCAR-00035 POSCAR-00044 POSCAR-00053 POSCAR-00062 POSCAR-00071 POSCAR-00080 POSCAR-00089 POSCAR-00098 POSCAR-00107 +POSCAR POSCAR-00009 POSCAR-00018 POSCAR-00027 POSCAR-00036 POSCAR-00045 POSCAR-00054 POSCAR-00063 POSCAR-00072 POSCAR-00081 POSCAR-00090 POSCAR-00099 POSCAR-00108 +POSCAR-00001 POSCAR-00010 POSCAR-00019 POSCAR-00028 POSCAR-00037 POSCAR-00046 POSCAR-00055 POSCAR-00064 POSCAR-00073 POSCAR-00082 POSCAR-00091 POSCAR-00100 POSCAR-00109 +POSCAR-00002 POSCAR-00011 POSCAR-00020 POSCAR-00029 POSCAR-00038 POSCAR-00047 POSCAR-00056 POSCAR-00065 POSCAR-00074 POSCAR-00083 POSCAR-00092 POSCAR-00101 POSCAR-00110 +POSCAR-00003 POSCAR-00012 POSCAR-00021 POSCAR-00030 POSCAR-00039 POSCAR-00048 POSCAR-00057 POSCAR-00066 POSCAR-00075 POSCAR-00084 POSCAR-00093 POSCAR-00102 POSCAR-00111 +POSCAR-00004 POSCAR-00013 POSCAR-00022 POSCAR-00031 POSCAR-00040 POSCAR-00049 POSCAR-00058 POSCAR-00067 POSCAR-00076 POSCAR-00085 POSCAR-00094 POSCAR-00103 +POSCAR-00005 POSCAR-00014 POSCAR-00023 POSCAR-00032 POSCAR-00041 POSCAR-00050 POSCAR-00059 POSCAR-00068 POSCAR-00077 POSCAR-00086 POSCAR-00095 POSCAR-00104 +POSCAR-00006 POSCAR-00015 POSCAR-00024 POSCAR-00033 POSCAR-00042 POSCAR-00051 POSCAR-00060 POSCAR-00069 POSCAR-00078 POSCAR-00087 POSCAR-00096 POSCAR-00105 +POSCAR-00007 POSCAR-00016 POSCAR-00025 POSCAR-00034 POSCAR-00043 POSCAR-00052 POSCAR-00061 POSCAR-00070 POSCAR-00079 POSCAR-00088 POSCAR-00097 POSCAR-00106 ``` For each displacement the forces needs to be calculated, i.e. in form of the output file of VASP (vasprun.xml). For a single VASP calculations one needs [KPOINTS](KPOINTS), [POTCAR](POTCAR), [INCAR](INCAR) in your case directory (where you have POSCARS) and those 111 displacements calculations can be generated by [prepare.sh](prepare.sh) script. Then each of the single 111 calculations is submitted [run.sh](run.sh) by [submit.sh](submit.sh). @@ -65,14 +63,14 @@ For each displacement the forces needs to be calculated, i.e. in form of the out ```bash $./prepare.sh $ls -disp-00001 disp-00009 disp-00017 disp-00025 disp-00033 disp-00041 disp-00049 disp-00057 disp-00065 disp-00073 disp-00081 disp-00089 disp-00097 disp-00105 INCAR -disp-00002 disp-00010 disp-00018 disp-00026 disp-00034 disp-00042 disp-00050 disp-00058 disp-00066 disp-00074 disp-00082 disp-00090 disp-00098 disp-00106 KPOINTS -disp-00003 disp-00011 disp-00019 disp-00027 disp-00035 disp-00043 disp-00051 disp-00059 disp-00067 disp-00075 disp-00083 disp-00091 disp-00099 disp-00107 POSCAR -disp-00004 disp-00012 disp-00020 disp-00028 disp-00036 disp-00044 disp-00052 disp-00060 disp-00068 disp-00076 disp-00084 disp-00092 disp-00100 disp-00108 POTCAR -disp-00005 disp-00013 disp-00021 disp-00029 disp-00037 disp-00045 disp-00053 disp-00061 disp-00069 disp-00077 disp-00085 disp-00093 disp-00101 disp-00109 prepare.sh -disp-00006 disp-00014 disp-00022 disp-00030 disp-00038 disp-00046 disp-00054 disp-00062 disp-00070 disp-00078 disp-00086 disp-00094 disp-00102 disp-00110 run.sh -disp-00007 disp-00015 disp-00023 disp-00031 disp-00039 disp-00047 disp-00055 disp-00063 disp-00071 disp-00079 disp-00087 disp-00095 disp-00103 disp-00111 submit.sh -disp-00008 disp-00016 disp-00024 disp-00032 disp-00040 disp-00048 disp-00056 disp-00064 disp-00072 disp-00080 disp-00088 disp-00096 disp-00104 disp_fc3.yaml +disp-00001 disp-00009 disp-00017 disp-00025 disp-00033 disp-00041 disp-00049 disp-00057 disp-00065 disp-00073 disp-00081 disp-00089 disp-00097 disp-00105 INCAR +disp-00002 disp-00010 disp-00018 disp-00026 disp-00034 disp-00042 disp-00050 disp-00058 disp-00066 disp-00074 disp-00082 disp-00090 disp-00098 disp-00106 KPOINTS +disp-00003 disp-00011 disp-00019 disp-00027 disp-00035 disp-00043 disp-00051 disp-00059 disp-00067 disp-00075 disp-00083 disp-00091 disp-00099 disp-00107 POSCAR +disp-00004 disp-00012 disp-00020 disp-00028 disp-00036 disp-00044 disp-00052 disp-00060 disp-00068 disp-00076 disp-00084 disp-00092 disp-00100 disp-00108 POTCAR +disp-00005 disp-00013 disp-00021 disp-00029 disp-00037 disp-00045 disp-00053 disp-00061 disp-00069 disp-00077 disp-00085 disp-00093 disp-00101 disp-00109 prepare.sh +disp-00006 disp-00014 disp-00022 disp-00030 disp-00038 disp-00046 disp-00054 disp-00062 disp-00070 disp-00078 disp-00086 disp-00094 disp-00102 disp-00110 run.sh +disp-00007 disp-00015 disp-00023 disp-00031 disp-00039 disp-00047 disp-00055 disp-00063 disp-00071 disp-00079 disp-00087 disp-00095 disp-00103 disp-00111 submit.sh +disp-00008 disp-00016 disp-00024 disp-00032 disp-00040 disp-00048 disp-00056 disp-00064 disp-00072 disp-00080 disp-00088 disp-00096 disp-00104 disp_fc3.yaml ``` Taylor your run.sh script to fit into your project and other needs and submit all 111 calculations using submit.sh script @@ -81,8 +79,8 @@ Taylor your run.sh script to fit into your project and other needs and submit al $ ./submit.sh ``` -Collecting results and post-processing with phono3py ---------------------------------------------------------------------------- +## Collecting Results and Post-Processing With Phono3py + Once all jobs are finished and vasprun.xml is created in each disp-XXXXX directory the collection is done by ```bash @@ -97,7 +95,7 @@ $ phono3py --dim="2 2 2" -c POSCAR resulting in `fc2.hdf5` and `fc3.hdf5` -### Thermal conductivity +### Thermal Conductivity The phonon lifetime calculations takes some time, however is independent on grid points, so could be splitted: @@ -105,47 +103,47 @@ The phonon lifetime calculations takes some time, however is independent on grid $ phono3py --fc3 --fc2 --dim="2 2 2" --mesh="9 9 9" --sigma 0.1 --wgp ``` -### Inspecting ir_grid_points.yaml +### Inspecting ir_grid_points.yaml ```bash $ grep grid_point ir_grid_points.yaml num_reduced_ir_grid_points: 35 ir_grid_points: # [address, weight] -- grid_point: 0 -- grid_point: 1 -- grid_point: 2 -- grid_point: 3 -- grid_point: 4 -- grid_point: 10 -- grid_point: 11 -- grid_point: 12 -- grid_point: 13 -- grid_point: 20 -- grid_point: 21 -- grid_point: 22 -- grid_point: 30 -- grid_point: 31 -- grid_point: 40 -- grid_point: 91 -- grid_point: 92 -- grid_point: 93 -- grid_point: 94 -- grid_point: 101 -- grid_point: 102 -- grid_point: 103 -- grid_point: 111 -- grid_point: 112 -- grid_point: 121 -- grid_point: 182 -- grid_point: 183 -- grid_point: 184 -- grid_point: 192 -- grid_point: 193 -- grid_point: 202 -- grid_point: 273 -- grid_point: 274 -- grid_point: 283 -- grid_point: 364 +* grid_point: 0 +* grid_point: 1 +* grid_point: 2 +* grid_point: 3 +* grid_point: 4 +* grid_point: 10 +* grid_point: 11 +* grid_point: 12 +* grid_point: 13 +* grid_point: 20 +* grid_point: 21 +* grid_point: 22 +* grid_point: 30 +* grid_point: 31 +* grid_point: 40 +* grid_point: 91 +* grid_point: 92 +* grid_point: 93 +* grid_point: 94 +* grid_point: 101 +* grid_point: 102 +* grid_point: 103 +* grid_point: 111 +* grid_point: 112 +* grid_point: 121 +* grid_point: 182 +* grid_point: 183 +* grid_point: 184 +* grid_point: 192 +* grid_point: 193 +* grid_point: 202 +* grid_point: 273 +* grid_point: 274 +* grid_point: 283 +* grid_point: 364 ``` one finds which grid points needed to be calculated, for instance using following diff --git a/docs.it4i/salomon/software/compilers.md b/docs.it4i/salomon/software/compilers.md index b14287af39e8f1f05176f7938bd11c6097e74c1a..8e62965ff71b3afbd4e178c5019a0101597401b5 100644 --- a/docs.it4i/salomon/software/compilers.md +++ b/docs.it4i/salomon/software/compilers.md @@ -1,34 +1,31 @@ -Compilers -========= +# Compilers Available compilers, including GNU, INTEL and UPC compilers There are several compilers for different programming languages available on the cluster: -- C/C++ -- Fortran 77/90/95/HPF -- Unified Parallel C -- Java +* C/C++ +* Fortran 77/90/95/HPF +* Unified Parallel C +* Java The C/C++ and Fortran compilers are provided by: Opensource: -- GNU GCC -- Clang/LLVM +* GNU GCC +* Clang/LLVM Commercial licenses: -- Intel -- PGI +* Intel +* PGI -Intel Compilers ---------------- +## Intel Compilers For information about the usage of Intel Compilers and other Intel products, please read the [Intel Parallel studio](intel-suite/) page. -PGI Compilers -------------- +## PGI Compilers The Portland Group Cluster Development Kit (PGI CDK) is available. @@ -55,8 +52,8 @@ PGDBG OpenMP/MPI debugger and PGPROF OpenMP/MPI profiler are available For more information, see the [PGI page](http://www.pgroup.com/products/pgicdk.htm). -GNU ---- +## GNU + For compatibility reasons there are still available the original (old 4.4.7-11) versions of GNU compilers as part of the OS. These are accessible in the search path by default. It is strongly recommended to use the up to date version which comes with the module GCC: @@ -80,12 +77,12 @@ With the module loaded two environment variables are predefined. One for maximum For more information about the possibilities of the compilers, please see the man pages. -Unified Parallel C ------------------- +## Unified Parallel C + UPC is supported by two compiler/runtime implementations: -- GNU - SMP/multi-threading support only -- Berkley - multi-node support as well as SMP/multi-threading support +* GNU - SMP/multi-threading support only +* Berkley - multi-node support as well as SMP/multi-threading support ### GNU UPC Compiler @@ -141,7 +138,10 @@ To use the Berkley UPC compiler and runtime environment to run the binaries use As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only. -For production runs, it is recommended to use the native InfiniBand implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. Please note, that the selection of the network is done at the compile time and not at runtime (as expected)! +For production runs, it is recommended to use the native InfiniBand implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. + +!!! warning + Selection of the network is done at the compile time and not at runtime (as expected)! Example UPC code: @@ -184,10 +184,10 @@ To run the example on two compute nodes using all 48 cores, with 48 threads, iss For more information see the man pages. -##Java +## Java For information how to use Java (runtime and/or compiler), please read the [Java page](java/). -##NVIDIA CUDA +## NVIDIA CUDA -For information how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](../../anselm-cluster-documentation/software/nvidia-cuda/). +For information how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](../../anselm/software/nvidia-cuda/). diff --git a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md index a9f06a44239e9da93ec0df0e72ab79a38cc1cbe0..05a6d2944b2e8db354e134c8f506f87b70f0531a 100644 --- a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md +++ b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md @@ -1,26 +1,24 @@ -COMSOL Multiphysics® -==================== +# COMSOL Multiphysics + +## Introduction -Introduction -------------------------- [COMSOL](http://www.comsol.com) is a powerful environment for modelling and solving various engineering and scientific problems based on partial differential equations. COMSOL is designed to solve coupled or multiphysics phenomena. For many standard engineering problems COMSOL provides add-on products such as electrical, mechanical, fluid flow, and chemical applications. -- [Structural Mechanics Module](http://www.comsol.com/structural-mechanics-module), -- [Heat Transfer Module](http://www.comsol.com/heat-transfer-module), -- [CFD Module](http://www.comsol.com/cfd-module), -- [Acoustics Module](http://www.comsol.com/acoustics-module), -- and [many others](http://www.comsol.com/products) +* [Structural Mechanics Module](http://www.comsol.com/structural-mechanics-module), +* [Heat Transfer Module](http://www.comsol.com/heat-transfer-module), +* [CFD Module](http://www.comsol.com/cfd-module), +* [Acoustics Module](http://www.comsol.com/acoustics-module), +* and [many others](http://www.comsol.com/products) COMSOL also allows an interface support for equation-based modelling of partial differential equations. -Execution ----------------------- +## Execution On the clusters COMSOL is available in the latest stable version. There are two variants of the release: -- **Non commercial** or so called >**EDU variant**>, which can be used for research and educational purposes. +* **Non commercial** or so called >**EDU variant**>, which can be used for research and educational purposes. -- **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing will be posted here soon. +* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing will be posted here soon. To load the of COMSOL load the module @@ -34,7 +32,7 @@ By default the **EDU variant** will be loaded. If user needs other version or va $ module avail COMSOL ``` -If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/). +If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). ```bash $ xhost + @@ -72,9 +70,9 @@ comsol -nn ${ntask} batch -configuration /tmp –mpiarg –rmk –mpiarg pbs -tm Working directory has to be created before sending the (comsol.pbs) job script into the queue. Input file (name_input_f.mph) has to be in working directory or full path to input file has to be specified. The appropriate path to the temp directory of the job has to be set by command option (-tmpdir). -LiveLink™* *for MATLAB® -------------------------- -COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL®API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB. +## LiveLink for MATLAB + +COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB. LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](../isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode. diff --git a/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md b/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md index e3a4950a772e6942dbd4caeabc2e5d6d885b3a9e..4358b930fedbfcdf3ea9277d2fa5c89e8a74ca37 100644 --- a/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md +++ b/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md @@ -1,23 +1,19 @@ -Licensing and Available Versions -================================ +# Licensing and Available Versions -Comsol licence can be used by: ------------------------------- +## Comsol Licence Can Be Used By: -- all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.) -- all persons who have a valid license -- students of the Technical University +* all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.) +* all persons who have a valid license +* students of the Technical University -Comsol EDU Network Licence --------------------------- +## Comsol EDU Network Licence The licence intended to be used for science and research, publications, students’ projects, teaching (academic licence). -Comsol COM Network Licence --------------------------- +## Comsol COM Network Licence -The licence intended to be used for science and research, publications, students’ projects, commercial research with no commercial use restrictions. Enables the solution of at least one job by one user in one program start. +The licence intended to be used for science and research, publications, students’ projects, commercial research with no commercial use restrictions. Enables the solution of at least one job by one user in one program start. -Available Versions ------------------- -- ver. 51 \ No newline at end of file +## Available Versions + +* ver. 51 diff --git a/docs.it4i/salomon/software/debuggers/Introduction.md b/docs.it4i/salomon/software/debuggers/Introduction.md index c85157da67777964414ed2133e82df75fc1a797d..a5c9cfb60154fbaf13faebaf15a508597b40703f 100644 --- a/docs.it4i/salomon/software/debuggers/Introduction.md +++ b/docs.it4i/salomon/software/debuggers/Introduction.md @@ -1,13 +1,10 @@ -Debuggers and profilers summary -=============================== +# Debuggers and profilers summary -Introduction ------------- +## Introduction We provide state of the art programms and tools to develop, profile and debug HPC codes at IT4Innovations. On these pages, we provide an overview of the profiling and debugging tools available on Anslem at IT4I. -Intel debugger --------------- +## Intel Debugger Intel debugger is no longer available since Parallel Studio version 2015 @@ -20,8 +17,8 @@ The intel debugger version 13.0 is available, via module intel. The debugger wor Read more at the [Intel Debugger](../intel-suite/intel-debugger/) page. -Allinea Forge (DDT/MAP) ------------------------ +## Allinea Forge (DDT/MAP) + Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process - even if these processes are distributed across a cluster using an MPI implementation. ```bash @@ -31,8 +28,8 @@ Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or Op Read more at the [Allinea DDT](allinea-ddt/) page. -Allinea Performance Reports ---------------------------- +## Allinea Performance Reports + Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs. Our license is limited to 64 MPI processes. ```bash @@ -42,8 +39,8 @@ Allinea Performance Reports characterize the performance of HPC application runs Read more at the [Allinea Performance Reports](allinea-performance-reports/) page. -RougeWave Totalview -------------------- +## RougeWave Totalview + TotalView is a source- and machine-level debugger for multi-process, multi-threaded programs. Its wide range of tools provides ways to analyze, organize, and test programs, making it easy to isolate and identify problems in individual threads and processes in programs of great complexity. ```bash @@ -53,8 +50,8 @@ TotalView is a source- and machine-level debugger for multi-process, multi-threa Read more at the [Totalview](total-view/) page. -Vampir trace analyzer ---------------------- +## Vampir Trace Analyzer + Vampir is a GUI trace analyzer for traces in OTF format. ```bash diff --git a/docs.it4i/salomon/software/debuggers/aislinn.md b/docs.it4i/salomon/software/debuggers/aislinn.md index c2a9982448b0bee936940655f406615075d60301..e1dee28b8d6d78ef7be2371afb2f8884f2b5f364 100644 --- a/docs.it4i/salomon/software/debuggers/aislinn.md +++ b/docs.it4i/salomon/software/debuggers/aislinn.md @@ -1,15 +1,14 @@ -Aislinn -======= +# Aislinn -- Aislinn is a dynamic verifier for MPI programs. For a fixed input it covers all possible runs with respect to nondeterminism introduced by MPI. It allows to detect bugs (for sure) that occurs very rare in normal runs. -- Aislinn detects problems like invalid memory accesses, deadlocks, misuse of MPI, and resource leaks. -- Aislinn is open-source software; you can use it without any licensing limitations. -- Web page of the project: <http://verif.cs.vsb.cz/aislinn/> +* Aislinn is a dynamic verifier for MPI programs. For a fixed input it covers all possible runs with respect to nondeterminism introduced by MPI. It allows to detect bugs (for sure) that occurs very rare in normal runs. +* Aislinn detects problems like invalid memory accesses, deadlocks, misuse of MPI, and resource leaks. +* Aislinn is open-source software; you can use it without any licensing limitations. +* Web page of the project: <http://verif.cs.vsb.cz/aislinn/> -!!! Note "Note" - Aislinn is software developed at IT4Innovations and some parts are still considered experimental. If you have any questions or experienced any problems, please contact the author: <stanislav.bohm@vsb.cz>. +!!! note + Aislinn is software developed at IT4Innovations and some parts are still considered experimental. If you have any questions or experienced any problems, please contact the author: <mailto:stanislav.bohm@vsb.cz>. -### Usage +## Usage Let us have the following program that contains a bug that is not manifested in all runs: @@ -83,20 +82,21 @@ At the beginning of the report there are some basic summaries of the verificatio  It shows us: - - Error occurs in process 0 in test.cpp on line 16. - - Stdout and stderr streams are empty. (The program does not write anything). - - The last part shows MPI calls for each process that occurs in the invalid run. The more detailed information about each call can be obtained by mouse cursor. + +* Error occurs in process 0 in test.cpp on line 16. +* Stdout and stderr streams are empty. (The program does not write anything). +* The last part shows MPI calls for each process that occurs in the invalid run. The more detailed information about each call can be obtained by mouse cursor. ### Limitations Since the verification is a non-trivial process there are some of limitations. -- The verified process has to terminate in all runs, i.e. we cannot answer the halting problem. -- The verification is a computationally and memory demanding process. We put an effort to make it efficient and it is an important point for further research. However covering all runs will be always more demanding than techniques that examines only a single run. The good practise is to start with small instances and when it is feasible, make them bigger. The Aislinn is good to find bugs that are hard to find because they occur very rarely (only in a rare scheduling). Such bugs often do not need big instances. -- Aislinn expects that your program is a "standard MPI" program, i.e. processes communicate only through MPI, the verified program does not interacts with the system in some unusual ways (e.g. opening sockets). +* The verified process has to terminate in all runs, i.e. we cannot answer the halting problem. +* The verification is a computationally and memory demanding process. We put an effort to make it efficient and it is an important point for further research. However covering all runs will be always more demanding than techniques that examines only a single run. The good practise is to start with small instances and when it is feasible, make them bigger. The Aislinn is good to find bugs that are hard to find because they occur very rarely (only in a rare scheduling). Such bugs often do not need big instances. +* Aislinn expects that your program is a "standard MPI" program, i.e. processes communicate only through MPI, the verified program does not interacts with the system in some unusual ways (e.g. opening sockets). There are also some limitations bounded to the current version and they will be removed in the future: -- All files containing MPI calls have to be recompiled by MPI implementation provided by Aislinn. The files that does not contain MPI calls, they do not have to recompiled. Aislinn MPI implementation supports many commonly used calls from MPI-2 and MPI-3 related to point-to-point communication, collective communication, and communicator management. Unfortunately, MPI-IO and one-side communication is not implemented yet. -- Each MPI can use only one thread (if you use OpenMP, set OMP_NUM_THREADS to 1). -- There are some limitations for using files, but if the program just reads inputs and writes results, it is ok. +* All files containing MPI calls have to be recompiled by MPI implementation provided by Aislinn. The files that does not contain MPI calls, they do not have to recompiled. Aislinn MPI implementation supports many commonly used calls from MPI-2 and MPI-3 related to point-to-point communication, collective communication, and communicator management. Unfortunately, MPI-IO and one-side communication is not implemented yet. +* Each MPI can use only one thread (if you use OpenMP, set OMP_NUM_THREADS to 1). +* There are some limitations for using files, but if the program just reads inputs and writes results, it is ok. diff --git a/docs.it4i/salomon/software/debuggers/allinea-ddt.md b/docs.it4i/salomon/software/debuggers/allinea-ddt.md index 0693e6504c24fb5a2c6b69a76500f9ec36f4ed64..41dd4c6e8266e257a425c0e7a8b54330c38ccf04 100644 --- a/docs.it4i/salomon/software/debuggers/allinea-ddt.md +++ b/docs.it4i/salomon/software/debuggers/allinea-ddt.md @@ -1,5 +1,4 @@ -Allinea Forge (DDT,MAP) -======================= +# Allinea Forge (DDT,MAP) Allinea Forge consist of two tools - debugger DDT and profiler MAP. @@ -7,20 +6,19 @@ Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or Op Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses pthreads, OpenMP or MPI. -License and Limitations for Anselm Users ----------------------------------------- +## License and Limitations for Anselm Users + On Anselm users can debug OpenMP or MPI code that runs up to 64 parallel processes. In case of debugging GPU or Xeon Phi accelerated codes the limit is 8 accelerators. These limitation means that: -- 1 user can debug up 64 processes, or -- 32 users can debug 2 processes, etc. +* 1 user can debug up 64 processes, or +* 32 users can debug 2 processes, etc. In case of debugging on accelerators: -- 1 user can debug on up to 8 accelerators, or -- 8 users can debug on single accelerator. +* 1 user can debug on up to 8 accelerators, or +* 8 users can debug on single accelerator. -Compiling Code to run with DDT ------------------------------- +## Compiling Code to Run With DDT ### Modules @@ -45,24 +43,24 @@ $ mpicc -g -O0 -o test_debug test.c $ mpif90 -g -O0 -o test_debug test.f ``` -### Compiler flags +### Compiler Flags Before debugging, you need to compile your code with theses flags: -!!! Note "Note" - - **g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. +!!! note + \- **g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. + + - - **O0** : Suppress all optimizations. - - - **O0** : Suppress all optimizations. +## Starting a Job With DDT -Starting a Job with DDT ------------------------ -Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh: +Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh: ```bash $ ssh -X username@anselm.it4i.cz ``` -Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](https://docs.it4i.cz/anselm-cluster-documentation/software/debuggers/resolveuid/11e53ad0d2fd4c5187537f4baeedff33) +Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command: @@ -86,8 +84,8 @@ To start the debugging directly without the submission window, user can specify ddt -start -np 4 ./hello_debug_impi ``` -Documentation -------------- +## Documentation + Users can find original User Guide after loading the DDT module: ```bash diff --git a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md index 6ab49b2d779ee27eef400e8ecbf227d58d01aa68..3d0826e994bb6434b9cd0cd100249393191c03d3 100644 --- a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md @@ -1,17 +1,15 @@ -Allinea Performance Reports -=========================== +# Allinea Performance Reports +## Introduction -Introduction ------------- Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs. The Allinea Performance Reports is most useful in profiling MPI rograms. Our license is limited to 64 MPI processes. -Modules -------- +## Modules + Allinea Performance Reports version 6.0 is available ```bash @@ -20,8 +18,7 @@ Allinea Performance Reports version 6.0 is available The module sets up environment variables, required for using the Allinea Performance Reports. -Usage ------ +## Usage Use the the perf-report wrapper on your (MPI) program. @@ -31,10 +28,10 @@ Instead of [running your MPI program the usual way](../mpi/mpi/), use the the pe $ perf-report mpirun ./mympiprog.x ``` -The mpi program will run as usual. The perf-report creates two additional files, in *.txt and *.html format, containing the performance report. Note that demanding MPI codes should be run within [ the queue system](../../resource-allocation-and-job-execution/job-submission-and-execution/). +The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](../../job-submission-and-execution/). + +## Example -Example -------- In this example, we will be profiling the mympiprog.x MPI program, using Allinea performance reports. Assume that the code is compiled with intel compilers and linked against intel MPI library: First, we allocate some nodes via the express queue: @@ -58,4 +55,4 @@ Now lets profile the code: $ perf-report mpirun ./mympiprog.x ``` -Performance report files [mympiprog_32p*.txt](mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p*.html](mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. +Performance report files [mympiprog_32p\*.txt](mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. diff --git a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md index 332601743958bf114e417f1ab2ce98d21034fa62..2fdbd18e166d3e553a8ad5719f7945f902cbd73c 100644 --- a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md +++ b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md @@ -1,26 +1,25 @@ -Intel VTune Amplifier XE -======================== +# Intel VTune Amplifier XE -Introduction ------------- -Intel*® *VTune™ Amplifier, part of Intel Parallel studio, is a GUI profiling tool designed for Intel processors. It offers a graphical performance analysis of single core and multithreaded applications. A highlight of the features: +## Introduction -- Hotspot analysis -- Locks and waits analysis -- Low level specific counters, such as branch analysis and memory bandwidth -- Power usage analysis - frequency and sleep states. +Intel *®* VTune™ Amplifier, part of Intel Parallel studio, is a GUI profiling tool designed for Intel processors. It offers a graphical performance analysis of single core and multithreaded applications. A highlight of the features: + +* Hotspot analysis +* Locks and waits analysis +* Low level specific counters, such as branch analysis and memory bandwidth +* Power usage analysis - frequency and sleep states.  -Usage ------ +## Usage + To profile an application with VTune Amplifier, special kernel modules need to be loaded. The modules are not loaded on the login nodes, thus direct profiling on login nodes is not possible. By default, the kernel modules ale not loaded on compute nodes neither. In order to have the modules loaded, you need to specify vtune=version PBS resource at job submit. The version is the same as for environment module. For example to use VTune/2016_update1: ```bash $ qsub -q qexp -A OPEN-0-0 -I -l select=1,vtune=2016_update1 ``` -After that, you can verify the modules sep*, pax and vtsspp are present in the kernel : +After that, you can verify the modules sep\*, pax and vtsspp are present in the kernel : ```bash $ lsmod | grep -e sep -e pax -e vtsspp @@ -34,6 +33,7 @@ To launch the GUI, first load the module: ```bash $ module add VTune/2016_update1 ``` + and launch the GUI : ```bash @@ -44,8 +44,7 @@ The GUI will open in new window. Click on "New Project..." to create a new proje To run a new analysis, click "New analysis...". You will see a list of possible analysis. Some of them will not be possible on the current CPU (eg. Intel Atom analysis is not possible on Sandy bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "Advanced Hotspots". Clicking on Start will start profiling of the application. -Remote Analysis ---------------- +## Remote Analysis VTune Amplifier also allows a form of remote analysis. In this mode, data for analysis is collected from the command line without GUI, and the results are then loaded to GUI on another machine. This allows profiling without interactive graphical jobs. To perform a remote analysis, launch a GUI somewhere, open the new analysis window and then click the button "Command line" in bottom right corner. It will show the command line needed to perform the selected analysis. @@ -57,20 +56,20 @@ The command line will look like this: Copy the line to clipboard and then you can paste it in your jobscript or in command line. After the collection is run, open the GUI once again, click the menu button in the upper right corner, and select "Open > Result...". The GUI will load the results from the run. -Xeon Phi --------- +## Xeon Phi + It is possible to analyze both native and offloaded Xeon Phi applications. -### Native mode +### Native Mode -This mode is useful for native Xeon Phi applications launched directly on the card. In *Analysis Target* window, select *Intel Xeon Phi coprocessor (native), *choose path to the binary and MIC card to run on. +This mode is useful for native Xeon Phi applications launched directly on the card. In *Analysis Target* window, select *Intel Xeon Phi coprocessor (native)*, choose path to the binary and MIC card to run on. -### Offload mode +### Offload Mode -This mode is useful for applications that are launched from the host and use offload, OpenCL or mpirun. In *Analysis Target* window, select *Intel Xeon Phi coprocessor (native), *choose path to the binaryand MIC card to run on. +This mode is useful for applications that are launched from the host and use offload, OpenCL or mpirun. In *Analysis Target* window, select *Intel Xeon Phi coprocessor (native)*, choose path to the binaryand MIC card to run on. -!!! Note "Note" - If the analysis is interrupted or aborted, further analysis on the card might be impossible and you will get errors like "ERROR connecting to MIC card". In this case please contact our support to reboot the MIC card. +!!! note + If the analysis is interrupted or aborted, further analysis on the card might be impossible and you will get errors like "ERROR connecting to MIC card". In this case please contact our support to reboot the MIC card. You may also use remote analysis to collect data from the MIC and then analyze it in the GUI later : @@ -88,8 +87,8 @@ Host launch: You can obtain this command line by pressing the "Command line..." button on Analysis Type screen. -References ----------- -1. <https://www.rcac.purdue.edu/tutorials/phi/PerformanceTuningXeonPhi-Tullos.pdf> Performance Tuning for Intel® Xeon Phi™ Coprocessors -2. <https://software.intel.com/en-us/intel-vtune-amplifier-xe-support/documentation> >Intel® VTune™ Amplifier Support -3. <https://software.intel.com/en-us/amplifier_help_linux> +## References + +1. [Performance Tuning for Intel® Xeon Phi™ Coprocessors](https://www.rcac.purdue.edu/tutorials/phi/PerformanceTuningXeonPhi-Tullos.pdf) +1. [Intel® VTune™ Amplifier Support](https://software.intel.com/en-us/intel-vtune-amplifier-xe-support/documentation) +1. [https://software.intel.com/en-us/amplifier_help_linux](https://software.intel.com/en-us/amplifier_help_linux) diff --git a/docs.it4i/salomon/software/debuggers/total-view.md b/docs.it4i/salomon/software/debuggers/total-view.md index 7781ab41042c06cef7484f78c22a4d877bff4af4..f4f69278ff59e8f2cd35aad8b5c79bf78a4a0171 100644 --- a/docs.it4i/salomon/software/debuggers/total-view.md +++ b/docs.it4i/salomon/software/debuggers/total-view.md @@ -1,10 +1,9 @@ -Total View -========== +# Total View TotalView is a GUI-based source code multi-process, multi-thread debugger. -License and Limitations for cluster Users ------------------------------------------ +## License and Limitations for Cluster Users + On the cluster users can debug OpenMP or MPI code that runs up to 64 parallel processes. These limitation means that: ```bash @@ -16,8 +15,7 @@ Debugging of GPU accelerated codes is also supported. You can check the status of the licenses [here](https://extranet.it4i.cz/rsweb/anselm/license/totalview). -Compiling Code to run with TotalView ------------------------------------- +## Compiling Code to Run With TotalView ### Modules @@ -43,17 +41,17 @@ Compile the code: mpif90 -g -O0 -o test_debug test.f ``` -### Compiler flags +### Compiler Flags Before debugging, you need to compile your code with theses flags: -!!! Note "Note" - **-g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. +!!! note + **-g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers. + + **-O0** : Suppress all optimizations. - **-O0** : Suppress all optimizations. +## Starting a Job With TotalView -Starting a Job with TotalView ------------------------------ Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh: ```bash @@ -70,7 +68,7 @@ From the login node an interactive session with X windows forwarding (-X option) Then launch the debugger with the totalview command followed by the name of the executable to debug. -### Debugging a serial code +### Debugging a Serial Code To debug a serial code use: @@ -78,12 +76,12 @@ To debug a serial code use: totalview test_debug ``` -### Debugging a parallel code - option 1 +### Debugging a Parallel Code - Option 1 To debug a parallel code compiled with **OpenMPI** you need to setup your TotalView environment: -!!! Note "Note" - **Please note:** To be able to run parallel debugging procedure from the command line without stopping the debugger in the mpiexec source code you have to add the following function to your **~/.tvdrc** file: +!!! hint + To be able to run parallel debugging procedure from the command line without stopping the debugger in the mpiexec source code you have to add the following function to your **~/.tvdrc** file. ```bash proc mpi_auto_run_starter {loaded_id} { @@ -114,7 +112,9 @@ The source code of this function can be also found in You can also add only following line to you ~/.tvdrc file instead of the entire function: -**source /apps/all/OpenMPI/1.10.1-GNU-4.9.3-2.25/etc/openmpi-totalview.tcl** +```bash +source /apps/all/OpenMPI/1.10.1-GNU-4.9.3-2.25/etc/openmpi-totalview.tcl +``` You need to do this step only once. See also [OpenMPI FAQ entry](https://www.open-mpi.org/faq/?category=running#run-with-tv) @@ -132,7 +132,7 @@ At this point the main TotalView GUI window will appear and you can insert the b  -### Debugging a parallel code - option 2 +### Debugging a Parallel Code - Option 2 Other option to start new parallel debugging session from a command line is to let TotalView to execute mpirun by itself. In this case user has to specify a MPI implementation used to compile the source code. @@ -148,7 +148,6 @@ After running previous command you will see the same window as shown in the scre More information regarding the command line parameters of the TotalView can be found TotalView Reference Guide, Chapter 7: TotalView Command Syntax. -Documentation -------------- +## Documentation [1] The [TotalView documentation](http://www.roguewave.com/support/product-documentation/totalview-family.aspx#totalview) web page is a good resource for learning more about some of the advanced TotalView features. diff --git a/docs.it4i/salomon/software/debuggers/valgrind.md b/docs.it4i/salomon/software/debuggers/valgrind.md index df3bda344fc9a0d41599a573e6405632ce2b6983..430118785a08bc43e67a4711396f9ac6b63c4afb 100644 --- a/docs.it4i/salomon/software/debuggers/valgrind.md +++ b/docs.it4i/salomon/software/debuggers/valgrind.md @@ -1,31 +1,30 @@ -Valgrind -======== +# Valgrind + +## About Valgrind -About Valgrind --------------- Valgrind is an open-source tool, used mainly for debuggig memory-related problems, such as memory leaks, use of uninitalized memory etc. in C/C++ applications. The toolchain was however extended over time with more functionality, such as debugging of threaded applications, cache profiling, not limited only to C/C++. Valgind is an extremely useful tool for debugging memory errors such as [off-by-one](http://en.wikipedia.org/wiki/Off-by-one_error). Valgrind uses a virtual machine and dynamic recompilation of binary code, because of that, you can expect that programs being debugged by Valgrind run 5-100 times slower. The main tools available in Valgrind are : -- **Memcheck**, the original, must used and default tool. Verifies memory access in you program and can detect use of unitialized memory, out of bounds memory access, memory leaks, double free, etc. -- **Massif**, a heap profiler. -- **Hellgrind** and **DRD** can detect race conditions in multi-threaded applications. -- **Cachegrind**, a cache profiler. -- **Callgrind**, a callgraph analyzer. -- For a full list and detailed documentation, please refer to the [official Valgrind documentation](http://valgrind.org/docs/). +* **Memcheck**, the original, must used and default tool. Verifies memory access in you program and can detect use of unitialized memory, out of bounds memory access, memory leaks, double free, etc. +* **Massif**, a heap profiler. +* **Hellgrind** and **DRD** can detect race conditions in multi-threaded applications. +* **Cachegrind**, a cache profiler. +* **Callgrind**, a callgraph analyzer. +* For a full list and detailed documentation, please refer to the [official Valgrind documentation](http://valgrind.org/docs/). + +## Installed Versions -Installed versions ------------------- There are two versions of Valgrind available on the cluster. -- Version 3.8.1, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. Also, it does not support AVX2 instructions, debugging of an AVX2-enabled executable with this version will fail -- Version 3.11.0 built by ICC with support for Intel MPI, available in module Valgrind/3.11.0-intel-2015b. After loading the module, this version replaces the default valgrind. -- Version 3.11.0 built by GCC with support for Open MPI, module Valgrind/3.11.0-foss-2015b +* Version 3.8.1, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. Also, it does not support AVX2 instructions, debugging of an AVX2-enabled executable with this version will fail +* Version 3.11.0 built by ICC with support for Intel MPI, available in module Valgrind/3.11.0-intel-2015b. After loading the module, this version replaces the default valgrind. +* Version 3.11.0 built by GCC with support for Open MPI, module Valgrind/3.11.0-foss-2015b + +## Usage -Usage ------ Compile the application which you want to debug as usual. It is advisable to add compilation flags -g (to add debugging information to the binary so that you will see original source code lines in the output) and -O0 (to disable compiler optimizations). For example, lets look at this C code, which has two problems: @@ -55,7 +54,7 @@ Now, compile it with Intel compiler: Now, lets run it with Valgrind. The syntax is: -valgrind [valgrind options] <your program binary> [your program options] +valgrind [valgrind options] < your program binary > [your program options] If no Valgrind options are specified, Valgrind defaults to running Memcheck tool. Please refer to the Valgrind documentation for a full description of command line options. @@ -131,8 +130,8 @@ In the output we can see that Valgrind has detected both errors - the off-by-one Now we can see that the memory leak is due to the malloc() at line 6. -Usage with MPI ---------------------------- +## Usage With MPI + Although Valgrind is not primarily a parallel debugger, it can be used to debug parallel applications as well. When launching your parallel applications, prepend the valgrind command. For example: ```bash @@ -160,7 +159,7 @@ so it is better to use the MPI-enabled valgrind from module. The MPI versions re $EBROOTVALGRIND/lib/valgrind/libmpiwrap-amd64-linux.so -which must be included in the LD_PRELOAD environment variable. +which must be included in the LD_PRELOAD environment variable. Lets look at this MPI example: diff --git a/docs.it4i/salomon/software/debuggers/vampir.md b/docs.it4i/salomon/software/debuggers/vampir.md index c19f105f006d40733b80443f42b11b119db6a626..99053546c14b43c51d5ab7728dfa3824f2016170 100644 --- a/docs.it4i/salomon/software/debuggers/vampir.md +++ b/docs.it4i/salomon/software/debuggers/vampir.md @@ -1,12 +1,11 @@ -Vampir -====== +# Vampir Vampir is a commercial trace analysis and visualisation tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces.  -Installed versions ------------------- +## Installed Versions + Version 8.5.0 is currently installed as module Vampir/8.5.0 : ```bash @@ -14,11 +13,10 @@ Version 8.5.0 is currently installed as module Vampir/8.5.0 : $ vampir & ``` -User manual ------------ +## User Manual + You can find the detailed user manual in PDF format in $EBROOTVAMPIR/doc/vampir-manual.pdf -References ----------- -1. <https://www.vampir.eu> +## References +1. <https://www.vampir.eu> diff --git a/docs.it4i/salomon/software/index.md b/docs.it4i/salomon/software/index.md deleted file mode 100644 index da0befc72109dad8ce4f5c764a6eb40a79237bd2..0000000000000000000000000000000000000000 --- a/docs.it4i/salomon/software/index.md +++ /dev/null @@ -1,64 +0,0 @@ -Salomon Cluster Software -=== - -## [Modules](../../modules-salomon) -* List of Available Modules -## [Compilers](compilers) -* Available compilers, including GNU, INTEL and UPC compilers -## [Intel Xeon Phi](intel-xeon-phi) -* A guide to Intel Xeon Phi usage -## [Java](java) -* Java on the cluster -## [Operating System](operating-system) -* The operating system, deployed on Salomon cluster -## Intel Suite -* The Intel Parallel Studio XE -### [Introduction](intel-suite/intel-parallel-studio-introduction) -### [Intel MKL](intel-suite/intel-mkl) -### [Intel Compilers](intel-suite/intel-compilers) -### [Intel IPP](intel-suite/intel-integrated-performance-primitives) -### [Intel TBB](intel-suite/intel-tbb) -### [Intel Debugger](intel-suite/intel-debugger) -### [Intel Inspector](intel-suite/intel-inspector) -### [Intel Trace Analyzer and Collector](intel-suite/intel-trace-analyzer-and-collector) -### [Intel Advisor](intel-suite/intel-advisor) -## MPI -* Message Passing Interface libraries -### [Introduction](mpi/mpi) -### [MPI4Py (MPI for Python)](mpi/mpi4py-mpi-for-python) -### [Running Open MPI](mpi/Running_OpenMPI) -## Debuggers -* A collection of development tools -### [Introduction](debuggers/Introduction) -### [Valgrind](debuggers/valgrind) -### [Allinea Forge (DDT,MAP)](debuggers/allinea-ddt) -### [Total View](debuggers/total-view) -### [Intel VTune Amplifier XE](debuggers/intel-vtune-amplifier) -### [Aislinn](debuggers/aislinn) -### [Allinea Performance Reports](debuggers/allinea-performance-reports) -### [Vampir](debuggers/vampir) -## Numerical Languages -* Interpreted languages for numerical computations -### [Introduction](numerical-languages/introduction) -### [R](numerical-languages/r) -### [MATLAB](numerical-languages/matlab) -### [Octave](numerical-languages/octave) -## Chemistry -* Tools for computational chemistry -### [Molpro](chemistry/molpro) -### [Phono3py](chemistry/phono3py) -### [NWChem](chemistry/nwchem) -## COMSOL -* A finite element analysis, solver and Simulation software -### [COMSOL](comsol/comsol-multiphysics) -### [Licensing and Available Versions](comsol/licensing-and-available-versions) -## ANSYS -* An engineering simulation software -### [Introduction](ansys/ansys) -### [Workbench](ansys/workbench) -### [ANSYS CFX](ansys/ansys-cfx) -### [ANSYS LS-DYNA](ansys/ansys-ls-dyna) -### [ANSYS MAPDL](ansys/ansys-mechanical-apdl) -### [ANSYS Fluent](ansys/ansys-fluent) -### [Setting license preferences](ansys/licensing) -### [Licensing and Available Versions](ansys/setting-license-preferences) diff --git a/docs.it4i/salomon/software/intel-suite/intel-advisor.md b/docs.it4i/salomon/software/intel-suite/intel-advisor.md index cf25a765ce349510bd49faa4058543c87f489e2d..427f5c98cfccf29de4870043c08074ac1a246135 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-advisor.md +++ b/docs.it4i/salomon/software/intel-suite/intel-advisor.md @@ -1,16 +1,15 @@ -Intel Advisor -============= +# Intel Advisor is tool aiming to assist you in vectorization and threading of your code. You can use it to profile your application and identify loops, that could benefit from vectorization and/or threading parallelism. -Installed versions ------------------- +## Installed Versions + The following versions are currently available on Salomon as modules: 2016 Update 2 - Advisor/2016_update2 -Usage ------ +## Usage + Your program should be compiled with -g switch to include symbol names. You should compile with -O2 or higher to see code that is already vectorized by the compiler. Profiling is possible either directly from the GUI, or from command line. @@ -21,12 +20,12 @@ To profile from GUI, launch Advisor: $ advixe-gui ``` -Then select menu File -> New -> Project. Choose a directory to save project data to. After clicking OK, Project properties window will appear, where you can configure path to your binary, launch arguments, working directory etc. After clicking OK, the project is ready. +Then select menu File -> New -> Project. Choose a directory to save project data to. After clicking OK, Project properties window will appear, where you can configure path to your binary, launch arguments, working directory etc. After clicking OK, the project is ready. In the left pane, you can switch between Vectorization and Threading workflows. Each has several possible steps which you can execute by clicking Collect button. Alternatively, you can click on Command Line, to see the command line required to run the analysis directly from command line. -References ----------- -1. [Intel® Advisor 2015 Tutorial: Find Where to Add Parallelism - C++ Sample](https://software.intel.com/en-us/advisorxe_2015_tut_lin_c) -2. [Product page](https://software.intel.com/en-us/intel-advisor-xe) -3. [Documentation](https://software.intel.com/en-us/intel-advisor-2016-user-guide-linux) +## References + +1. [Intel® Advisor 2015 Tutorial: Find Where to Add Parallelism - C++ Sample](https://software.intel.com/en-us/intel-advisor-tutorial-vectorization-windows-cplusplus) +1. [Product page](https://software.intel.com/en-us/intel-advisor-xe) +1. [Documentation](https://software.intel.com/en-us/intel-advisor-2016-user-guide-linux) diff --git a/docs.it4i/salomon/software/intel-suite/intel-compilers.md b/docs.it4i/salomon/software/intel-suite/intel-compilers.md index 0b61d00afc3b7ecc7122d56994313bfa8dafdc6d..63a05bd91e15c04afa6a3cc8d21231ba030437bc 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-compilers.md +++ b/docs.it4i/salomon/software/intel-suite/intel-compilers.md @@ -1,5 +1,4 @@ -Intel Compilers -=============== +# Intel Compilers The Intel compilers in multiple versions are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler. @@ -29,9 +28,9 @@ The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP paral Read more at <https://software.intel.com/en-us/intel-cplusplus-compiler-16.0-user-and-reference-guide> -Sandy Bridge/Ivy Bridge/Haswell binary compatibility ----------------------------------------------------- +## Sandy Bridge/Ivy Bridge/Haswell Binary Compatibility + Anselm nodes are currently equipped with Sandy Bridge CPUs, while Salomon compute nodes are equipped with Haswell based architecture. The UV1 SMP compute server has Ivy Bridge CPUs, which are equivalent to Sandy Bridge (only smaller manufacturing technology). The new processors are backward compatible with the Sandy Bridge nodes, so all programs that ran on the Sandy Bridge processors, should also run on the new Haswell nodes. To get optimal performance out of the Haswell processors a program should make use of the special AVX2 instructions for this processor. One can do this by recompiling codes with the compiler flags designated to invoke these instructions. For the Intel compiler suite, there are two ways of doing this: -- Using compiler flag (both for Fortran and C): -xCORE-AVX2. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge/Ivy Bridge nodes. -- Using compiler flags (both for Fortran and C): -xAVX -axCORE-AVX2. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge/Ivy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. +* Using compiler flag (both for Fortran and C): -xCORE-AVX2. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge/Ivy Bridge nodes. +* Using compiler flags (both for Fortran and C): -xAVX -axCORE-AVX2. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge/Ivy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. diff --git a/docs.it4i/salomon/software/intel-suite/intel-debugger.md b/docs.it4i/salomon/software/intel-suite/intel-debugger.md index 7452cbb501860d8117a480bc0d6e524c73e49311..d0fef6ab7fbe2e50e8e7f8238585521bb5cb9695 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-debugger.md +++ b/docs.it4i/salomon/software/intel-suite/intel-debugger.md @@ -1,11 +1,10 @@ -Intel Debugger -============== +# Intel Debugger IDB is no longer available since Intel Parallel Studio 2015 -Debugging serial applications ------------------------------ -The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI. +## Debugging Serial Applications + +The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI. ```bash $ module load intel/2014.06 @@ -19,7 +18,7 @@ The debugger may run in text mode. To debug in text mode, use $ idbc ``` -To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/) +To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/) Example: @@ -36,13 +35,13 @@ Example: In this example, we allocate 1 full compute node, compile program myprog.c with debugging options -O0 -g and run the idb debugger interactively on the myprog.x executable. The GUI access is via X11 port forwarding provided by the PBS workload manager. -Debugging parallel applications -------------------------------- +## Debugging Parallel Applications + Intel debugger is capable of debugging multithreaded and MPI parallel programs as well. -### Small number of MPI ranks +### Small Number of MPI Ranks -For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way: +For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way: ```bash $ qsub -q qexp -l select=2:ncpus=24 -X -I @@ -55,7 +54,7 @@ For debugging small number of MPI ranks, you may execute and debug each rank in In this example, we allocate 2 full compute node, run xterm on each node and start idb debugger in command line mode, debugging two ranks of mympiprog.x application. The xterm will pop up for each rank, with idb prompt ready. The example is not limited to use of Intel MPI -### Large number of MPI ranks +### Large Number of MPI Ranks Run the idb debugger from within the MPI debug option. This will cause the debugger to bind to all ranks and provide aggregated outputs across the ranks, pausing execution automatically just after startup. You may then set break points and step the execution manually. Using Intel MPI: @@ -68,10 +67,10 @@ Run the idb debugger from within the MPI debug option. This will cause the debug $ mpirun -n 48 -idb ./mympiprog.x ``` -### Debugging multithreaded application +### Debugging Multithreaded Application Run the idb debugger in GUI mode. The menu Parallel contains number of tools for debugging multiple threads. One of the most useful tools is the **Serialize Execution** tool, which serializes execution of concurrent threads for easy orientation and identification of concurrency related bugs. -Further information -------------------- +## Further Information + Exhaustive manual on idb features and usage is published at Intel website, <https://software.intel.com/sites/products/documentation/doclib/iss/2013/compiler/cpp-lin/> diff --git a/docs.it4i/salomon/software/intel-suite/intel-inspector.md b/docs.it4i/salomon/software/intel-suite/intel-inspector.md index 992b7bd15450d1d523b2f149d1fd1e0c2f99b206..6231a65347abc13d442aea0586d6003ac7d3c798 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-inspector.md +++ b/docs.it4i/salomon/software/intel-suite/intel-inspector.md @@ -1,21 +1,20 @@ -Intel Inspector -=============== +# Intel Inspector Intel Inspector is a dynamic memory and threading error checking tool for C/C++/Fortran applications. It can detect issues such as memory leaks, invalid memory references, uninitalized variables, race conditions, deadlocks etc. -Installed versions ------------------- +## Installed Versions + The following versions are currently available on Salomon as modules: 2016 Update 1 - Inspector/2016_update1 -Usage ------ +## Usage + Your program should be compiled with -g switch to include symbol names. Optimizations can be turned on. Debugging is possible either directly from the GUI, or from command line. -### GUI mode +### GUI Mode To debug from GUI, launch Inspector: @@ -23,18 +22,18 @@ To debug from GUI, launch Inspector: $ inspxe-gui & ``` -Then select menu File -> New -> Project. Choose a directory to save project data to. After clicking OK, Project properties window will appear, where you can configure path to your binary, launch arguments, working directory etc. After clicking OK, the project is ready. +Then select menu File -> New -> Project. Choose a directory to save project data to. After clicking OK, Project properties window will appear, where you can configure path to your binary, launch arguments, working directory etc. After clicking OK, the project is ready. In the main pane, you can start a predefined analysis type or define your own. Click Start to start the analysis. Alternatively, you can click on Command Line, to see the command line required to run the analysis directly from command line. -### Batch mode +### Batch Mode + +Analysis can be also run from command line in batch mode. Batch mode analysis is run with command inspxe-cl. To obtain the required parameters, either consult the documentation or you can configure the analysis in the GUI and then click "Command Line" button in the lower right corner to the respective command line. -Analysis can be also run from command line in batch mode. Batch mode analysis is run with command inspxe-cl. To obtain the required parameters, either consult the documentation or you can configure the analysis in the GUI and then click "Command Line" button in the lower right corner to the respective command line. +Results obtained from batch mode can be then viewed in the GUI by selecting File -> Open -> Result... -Results obtained from batch mode can be then viewed in the GUI by selecting File -> Open -> Result... +## References -References ----------- -1. [Product page](https://software.intel.com/en-us/intel-inspector-xe) -2. [Documentation and Release Notes](https://software.intel.com/en-us/intel-inspector-xe-support/documentation) -3. [Tutorials](https://software.intel.com/en-us/articles/inspectorxe-tutorials) +1. [Product page](https://software.intel.com/en-us/intel-inspector-xe) +1. [Documentation and Release Notes](https://software.intel.com/en-us/intel-inspector-xe-support/documentation) +1. [Tutorials](https://software.intel.com/en-us/articles/inspectorxe-tutorials) diff --git a/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md b/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md index b324e2339143e63a43e6de72a86cd0d83682b9db..ead2008dc115bd5b8d7d76a623e9fe22b9161d56 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md +++ b/docs.it4i/salomon/software/intel-suite/intel-integrated-performance-primitives.md @@ -1,8 +1,7 @@ -Intel IPP -========= +# Intel IPP + +## Intel Integrated Performance Primitives -Intel Integrated Performance Primitives --------------------------------------- Intel Integrated Performance Primitives, version 9.0.1, compiled for AVX2 vector instructions is available, via module ipp. The IPP is a very rich library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax, as well as cryptographic functions, linear algebra functions and many more. Check out IPP before implementing own math functions for data processing, it is likely already there. @@ -13,8 +12,7 @@ Check out IPP before implementing own math functions for data processing, it is The module sets up environment variables, required for linking and running ipp enabled applications. -IPP example ------------ +## IPP Example ```cpp #include "ipp.h" @@ -75,8 +73,8 @@ You will need the ipp module loaded to run the ipp enabled executable. This may $ icc testipp.c -o testipp.x -Wl,-rpath=$LIBRARY_PATH -lippi -lipps -lippcore ``` -Code samples and documentation ------------------------------- +## Code Samples and Documentation + Intel provides number of [Code Samples for IPP](https://software.intel.com/en-us/articles/code-samples-for-intel-integrated-performance-primitives-library), illustrating use of IPP. Read full documentation on IPP [on Intel website,](http://software.intel.com/sites/products/search/search.php?q=&x=15&y=6&product=ipp&version=7.1&docos=lin) in particular the [IPP Reference manual.](http://software.intel.com/sites/products/documentation/doclib/ipp_sa/71/ipp_manual/index.htm) diff --git a/docs.it4i/salomon/software/intel-suite/intel-mkl.md b/docs.it4i/salomon/software/intel-suite/intel-mkl.md index 43a2ff1e310d33d6bc0c9d8984a71c5eb4681454..322492010827e5dc2cc63d6ccd7cb3452f1a4214 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-mkl.md +++ b/docs.it4i/salomon/software/intel-suite/intel-mkl.md @@ -1,19 +1,17 @@ -Intel MKL -========= +# Intel MKL +## Intel Math Kernel Library -Intel Math Kernel Library -------------------------- Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL provides these basic math kernels: -- BLAS (level 1, 2, and 3) and LAPACK linear algebra routines, offering vector, vector-matrix, and matrix-matrix operations. -- The PARDISO direct sparse solver, an iterative sparse solver, and supporting sparse BLAS (level 1, 2, and 3) routines for solving sparse systems of equations. -- ScaLAPACK distributed processing linear algebra routines for Linux* and Windows* operating systems, as well as the Basic Linear Algebra Communications Subprograms (BLACS) and the Parallel Basic Linear Algebra Subprograms (PBLAS). -- Fast Fourier transform (FFT) functions in one, two, or three dimensions with support for mixed radices (not limited to sizes that are powers of 2), as well as distributed versions of these functions. -- Vector Math Library (VML) routines for optimized mathematical operations on vectors. -- Vector Statistical Library (VSL) routines, which offer high-performance vectorized random number generators (RNG) for several probability distributions, convolution and correlation routines, and summary statistics functions. -- Data Fitting Library, which provides capabilities for spline-based approximation of functions, derivatives and integrals of functions, and search. -- Extended Eigensolver, a shared memory version of an eigensolver based on the Feast Eigenvalue Solver. +* BLAS (level 1, 2, and 3) and LAPACK linear algebra routines, offering vector, vector-matrix, and matrix-matrix operations. +* The PARDISO direct sparse solver, an iterative sparse solver, and supporting sparse BLAS (level 1, 2, and 3) routines for solving sparse systems of equations. +* ScaLAPACK distributed processing linear algebra routines for Linux and Windows operating systems, as well as the Basic Linear Algebra Communications Subprograms (BLACS) and the Parallel Basic Linear Algebra Subprograms (PBLAS). +* Fast Fourier transform (FFT) functions in one, two, or three dimensions with support for mixed radices (not limited to sizes that are powers of 2), as well as distributed versions of these functions. +* Vector Math Library (VML) routines for optimized mathematical operations on vectors. +* Vector Statistical Library (VSL) routines, which offer high-performance vectorized random number generators (RNG) for several probability distributions, convolution and correlation routines, and summary statistics functions. +* Data Fitting Library, which provides capabilities for spline-based approximation of functions, derivatives and integrals of functions, and search. +* Extended Eigensolver, a shared memory version of an eigensolver based on the Feast Eigenvalue Solver. For details see the [Intel MKL Reference Manual](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mklman/index.htm). @@ -31,16 +29,16 @@ Intel MKL library may be linked using any compiler. With intel compiler use -mkl Intel MKL library provides number of interfaces. The fundamental once are the LP64 and ILP64. The Intel MKL ILP64 libraries use the 64-bit integer type (necessary for indexing large arrays, with more than 231^-1 elements), whereas the LP64 libraries index arrays with the 32-bit integer type. -|Interface|Integer type| -|---|---| -|LP64|32-bit, int, integer(kind=4), MPI_INT| -|ILP64|64-bit, long int, integer(kind=8), MPI_INT64| +| Interface | Integer type | +| --------- | -------------------------------------------- | +| LP64 | 32-bit, int, integer(kind=4), MPI_INT | +| ILP64 | 64-bit, long int, integer(kind=8), MPI_INT64 | ### Linking Linking Intel MKL libraries may be complex. Intel [mkl link line advisor](http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor) helps. See also [examples](intel-mkl/#examples) below. -You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line: +You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line: ```bash $ icc .... -Wl,-rpath=$LIBRARY_PATH ... @@ -59,11 +57,11 @@ For this to work, the application must link the threaded MKL library (default). The application will run with 24 threads with affinity optimized for fine grain parallelization. -Examples ------------- +## Examples + Number of examples, demonstrating use of the Intel MKL library and its linking is available on clusters, in the $MKL_EXAMPLES directory. In the examples below, we demonstrate linking Intel MKL to Intel and GNU compiled program for multi-threaded matrix multiplication. -### Working with examples +### Working With Examples ```bash $ module load intel @@ -74,9 +72,9 @@ Number of examples, demonstrating use of the Intel MKL library and its linking i $ make sointel64 function=cblas_dgemm ``` -In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL example suite installed on clusters. +In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL example suite installed on clusters. -### Example: MKL and Intel compiler +### Example: MKL and Intel Compiler ```bash $ module load intel @@ -88,16 +86,16 @@ In this example, we compile, link and run the cblas_dgemm example, demonstratin $ ./cblas_dgemmx.x data/cblas_dgemmx.d ``` -In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL with icc -mkl option. Using the -mkl option is equivalent to: +In this example, we compile, link and run the cblas_dgemm example, demonstrating use of MKL with icc -mkl option. Using the -mkl option is equivalent to: ```bash $ icc -w source/cblas_dgemmx.c source/common_func.c -o cblas_dgemmx.x -I$MKL_INC_DIR -L$MKL_LIB_DIR -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 ``` -In this example, we compile and link the cblas_dgemm example, using LP64 interface to threaded MKL and Intel OMP threads implementation. +In this example, we compile and link the cblas_dgemm example, using LP64 interface to threaded MKL and Intel OMP threads implementation. -### Example: Intel MKL and GNU compiler +### Example: Intel MKL and GNU Compiler ```bash $ module load GCC @@ -111,16 +109,16 @@ In this example, we compile and link the cblas_dgemm example, using LP64 interf $ ./cblas_dgemmx.x data/cblas_dgemmx.d ``` -In this example, we compile, link and run the cblas_dgemm example, using LP64 interface to threaded MKL and gnu OMP threads implementation. +In this example, we compile, link and run the cblas_dgemm example, using LP64 interface to threaded MKL and gnu OMP threads implementation. + +## MKL and MIC Accelerators -MKL and MIC accelerators ------------------------- The Intel MKL is capable to automatically offload the computations o the MIC accelerator. See section [Intel Xeon Phi](../intel-xeon-phi/) for details. -LAPACKE C Interface -------------------- +## LAPACKE C Interface + MKL includes LAPACKE C Interface to LAPACK. For some reason, although Intel is the author of LAPACKE, the LAPACKE header files are not present in MKL. For this reason, we have prepared LAPACKE module, which includes Intel's LAPACKE headers from official LAPACK, which you can use to compile code using LAPACKE interface against MKL. -Further reading ---------------- +## Further Reading + Read more on [Intel website](http://software.intel.com/en-us/intel-mkl), in particular the [MKL users guide](https://software.intel.com/en-us/intel-mkl/documentation/linux). diff --git a/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md b/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md index 1fc21927a1f199d775f1894c5405069a3ea50069..4b1c9308957a43fafafb8f5c1280c11ba2bf81a1 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md +++ b/docs.it4i/salomon/software/intel-suite/intel-parallel-studio-introduction.md @@ -1,21 +1,19 @@ -Intel Parallel Studio -===================== +# Intel Parallel Studio The Salomon cluster provides following elements of the Intel Parallel Studio XE -|Intel Parallel Studio XE| -| -------------------------------------------------| -|Intel Compilers| -|Intel Debugger| -|Intel MKL Library| -|Intel Integrated Performance Primitives Library| -|Intel Threading Building Blocks Library| -|Intel Trace Analyzer and Collector| -|Intel Advisor| -|Intel Inspector| +Intel Parallel Studio XE -Intel compilers ---------------- +* Intel Compilers +* Intel Debugger +* Intel MKL Library +* Intel Integrated Performance Primitives Library +* Intel Threading Building Blocks Library +* Intel Trace Analyzer and Collector +* Intel Advisor +* Intel Inspector + +## Intel Compilers The Intel compilers version 131.3 are available, via module iccifort/2013.5.192-GCC-4.8.3. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler. @@ -27,11 +25,12 @@ The Intel compilers version 131.3 are available, via module iccifort/2013.5.192- Read more at the [Intel Compilers](intel-compilers/) page. -Intel debugger --------------- +## Intel Debugger + IDB is no longer available since Parallel Studio 2015. The intel debugger version 13.0 is available, via module intel. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. + ```bash $ module load intel $ idb @@ -39,8 +38,8 @@ The intel debugger version 13.0 is available, via module intel. The debugger wor Read more at the [Intel Debugger](intel-debugger/) page. -Intel Math Kernel Library -------------------------- +## Intel Math Kernel Library + Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, extensively threaded and optimized for maximum performance. Intel MKL unites and provides these basic components: BLAS, LAPACK, ScaLapack, PARDISO, FFT, VML, VSL, Data fitting, Feast Eigensolver and many more. ```bash @@ -49,8 +48,8 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e Read more at the [Intel MKL](intel-mkl/) page. -Intel Integrated Performance Primitives ---------------------------------------- +## Intel Integrated Performance Primitives + Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is available, via module ipp. The IPP is a library of highly optimized algorithmic building blocks for media and data applications. This includes signal, image and frame processing algorithms, such as FFT, FIR, Convolution, Optical Flow, Hough transform, Sum, MinMax and many more. ```bash @@ -59,8 +58,8 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai Read more at the [Intel IPP](intel-integrated-performance-primitives/) page. -Intel Threading Building Blocks -------------------------------- +## Intel Threading Building Blocks + Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. It is designed to promote scalable data parallel programming. Additionally, it fully supports nested parallelism, so you can build larger parallel components from smaller parallel components. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. ```bash diff --git a/docs.it4i/salomon/software/intel-suite/intel-tbb.md b/docs.it4i/salomon/software/intel-suite/intel-tbb.md index 7d05e24e1241192b0a390337d407a2feed0887cd..94e32f39073b41801f20391b04cc5081f99649f7 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-tbb.md +++ b/docs.it4i/salomon/software/intel-suite/intel-tbb.md @@ -1,8 +1,7 @@ -Intel TBB -========= +# Intel TBB + +## Intel Threading Building Blocks -Intel Threading Building Blocks ------------------------------- Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](../intel-xeon-phi/). Intel TBB version 4.3.5.187 is available on the cluster. @@ -15,8 +14,8 @@ The module sets up environment variables, required for linking and running tbb e Link the tbb library, using -ltbb -Examples --------- +## Examples + Number of examples, demonstrating use of TBB and its built-in scheduler is available on Anselm, in the $TBB_EXAMPLES directory. ```bash @@ -36,6 +35,6 @@ You will need the tbb module loaded to run the tbb enabled executable. This may $ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb ``` -Further reading ---------------- +## Further Reading + Read more on Intel website, <http://software.intel.com/sites/products/documentation/doclib/tbb_sa/help/index.htm> diff --git a/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md b/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md index e88fff56b1fbe779b4ee4d5e94b94df281145231..5d4513d306d1b9a4bf159c71231c9677cc2b8165 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md +++ b/docs.it4i/salomon/software/intel-suite/intel-trace-analyzer-and-collector.md @@ -1,16 +1,15 @@ -Intel Trace Analyzer and Collector -================================== +# Intel Trace Analyzer and Collector Intel Trace Analyzer and Collector (ITAC) is a tool to collect and graphicaly analyze behaviour of MPI applications. It helps you to analyze communication patterns of your application, identify hotspots, perform correctnes checking (identify deadlocks, data corruption etc), simulate how your application would run on a different interconnect. ITAC is a offline analysis tool - first you run your application to collect a trace file, then you can open the trace in a GUI analyzer to view it. -Installed version ------------------ -Currently on Salomon is version 9.1.2.024 available as module itac/9.1.2.024 +## Installed Version + +Currently on Salomon is version 9.1.2.024 available as module itac/9.1.2.024 + +## Collecting Traces -Collecting traces ------------------ ITAC can collect traces from applications that are using Intel MPI. To generate a trace, simply add -trace option to your mpirun command : ```bash @@ -20,23 +19,22 @@ ITAC can collect traces from applications that are using Intel MPI. To generate The trace will be saved in file myapp.stf in the current directory. -Viewing traces --------------- -To view and analyze the trace, open ITAC GUI in a [graphical environment](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/): +## Viewing Traces + +To view and analyze the trace, open ITAC GUI in a [graphical environment](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/): ```bash $ module load itac/9.1.2.024 $ traceanalyzer ``` -The GUI will launch and you can open the produced *.stf file. +The GUI will launch and you can open the produced `*`.stf file.  Please refer to Intel documenation about usage of the GUI tool. -References ----------- -1. [Getting Started with Intel® Trace Analyzer and Collector](https://software.intel.com/en-us/get-started-with-itac-for-linux) -2. [Intel® Trace Analyzer and Collector - Documentation](http://Intel®%20Trace%20Analyzer%20and%20Collector%20-%20Documentation) +## References +1. [Getting Started with Intel® Trace Analyzer and Collector](https://software.intel.com/en-us/get-started-with-itac-for-linux) +1. [Intel® Trace Analyzer and Collector - Documentation](https://software.intel.com/en-us/intel-trace-analyzer) diff --git a/docs.it4i/salomon/software/intel-xeon-phi.md b/docs.it4i/salomon/software/intel-xeon-phi.md index 19ec77df6a20de50e769244affc8cc63391aceb1..26c87cb2aab21e606d205161f2a3b62bf4058d2c 100644 --- a/docs.it4i/salomon/software/intel-xeon-phi.md +++ b/docs.it4i/salomon/software/intel-xeon-phi.md @@ -1,12 +1,11 @@ -Intel Xeon Phi -============== +# Intel Xeon Phi -##A guide to Intel Xeon Phi usage +## Guide to Intel Xeon Phi Usage Intel Xeon Phi can be programmed in several modes. The default mode on Anselm is offload mode, but all modes described in this document are supported. -Intel Utilities for Xeon Phi ----------------------------- +## Intel Utilities for Xeon Phi + To get access to a compute node with Intel Xeon Phi accelerator, use the PBS interactive session ```bash @@ -89,8 +88,8 @@ The output of the "micinfo" utility executed on one of the Anselm node is as fol GDDR Voltage : 1501000 uV ``` -Offload Mode ------------- +## Offload Mode + To compile a code for Intel Xeon Phi a MPSS stack has to be installed on the machine where compilation is executed. Currently the MPSS stack is only installed on compute nodes equipped with accelerators. ```bash @@ -104,7 +103,10 @@ For debugging purposes it is also recommended to set environment variable "OFFLO export OFFLOAD_REPORT=3 ``` -A very basic example of code that employs offload programming technique is shown in the next listing. Please note that this code is sequential and utilizes only single core of the accelerator. +A very basic example of code that employs offload programming technique is shown in the next listing. + +!!! note + This code is sequential and utilizes only single core of the accelerator. ```bash $ vim source-offload.cpp @@ -230,16 +232,16 @@ During the compilation Intel compiler shows which loops have been vectorized in Some interesting compiler flags useful not only for code debugging are: -!!! Note "Note" - Debugging +!!! note + Debugging openmp_report[0|1|2] - controls the compiler based vectorization diagnostic level vec-report[0|1|2] - controls the OpenMP parallelizer diagnostic level Performance ooptimization xhost - FOR HOST ONLY - to generate AVX (Advanced Vector Extensions) instructions. -Automatic Offload using Intel MKL Library ------------------------------------------ +## Automatic Offload Using Intel MKL Library + Intel MKL includes an Automatic Offload (AO) feature that enables computationally intensive MKL functions called in user code to benefit from attached Intel Xeon Phi coprocessors automatically and transparently. Behavioral of automatic offload mode is controlled by functions called within the program or by environmental variables. Complete list of controls is listed [here](http://software.intel.com/sites/products/documentation/doclib/mkl_sa/11/mkl_userguide_lnx/GUID-3DC4FC7D-A1E4-423D-9C0C-06AB265FFA86.htm). @@ -256,9 +258,9 @@ or by setting environment variable $ export MKL_MIC_ENABLE=1 ``` -To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [ Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation). +To get more information about automatic offload please refer to "[Using Intel® MKL Automatic Offload on Intel ® Xeon Phi™ Coprocessors](http://software.intel.com/sites/default/files/11MIC42_How_to_Use_MKL_Automatic_Offload_0.pdf)" white paper or [Intel MKL documentation](https://software.intel.com/en-us/articles/intel-math-kernel-library-documentation). -### Automatic offload example +### Automatic Offload Example At first get an interactive PBS session on a node with MIC accelerator and load "intel" module that automatically loads "mkl" module as well. @@ -326,8 +328,8 @@ Following example show how to automatically offload an SGEMM (single precision - } ``` -!!! Note "Note" - Please note: This example is simplified version of an example from MKL. The expanded version can be found here: **$MKL_EXAMPLES/mic_ao/blasc/source/sgemm.c** +!!! note + This example is simplified version of an example from MKL. The expanded version can be found here: **$MKL_EXAMPLES/mic_ao/blasc/source/sgemm.c** To compile a code using Intel compiler use: @@ -357,8 +359,8 @@ The output of a code should look similar to following listing, where lines start Done ``` -Native Mode ------------ +## Native Mode + In the native mode a program is executed directly on Intel Xeon Phi without involvement of the host machine. Similarly to offload mode, the code is compiled on the host computer with Intel compilers. To compile a code user has to be connected to a compute with MIC and load Intel compilers module. To get an interactive session on a compute node with an Intel Xeon Phi and load the module use following commands: @@ -369,8 +371,8 @@ To compile a code user has to be connected to a compute with MIC and load Intel $ module load intel/13.5.192 ``` -!!! Note "Note" - Please note that particular version of the Intel module is specified. This information is used later to specify the correct library paths. +!!! note + Particular version of the Intel module is specified. This information is used later to specify the correct library paths. To produce a binary compatible with Intel Xeon Phi architecture user has to specify "-mmic" compiler flag. Two compilation examples are shown below. The first example shows how to compile OpenMP parallel code "vect-add.c" for host only: @@ -412,20 +414,19 @@ If the code is parallelized using OpenMP a set of additional libraries is requir mic0 $ export LD_LIBRARY_PATH=/apps/intel/composer_xe_2013.5.192/compiler/lib/mic:$LD_LIBRARY_PATH ``` -!!! Note "Note" - Please note that the path exported in the previous example contains path to a specific compiler (here the version is 5.192). This version number has to match with the version number of the Intel compiler module that was used to compile the code on the host computer. +!!! note + The path exported contains path to a specific compiler (here the version is 5.192). This version number has to match with the version number of the Intel compiler module that was used to compile the code on the host computer. For your information the list of libraries and their location required for execution of an OpenMP parallel code on Intel Xeon Phi is: -!!! Note "Note" - /apps/intel/composer_xe_2013.5.192/compiler/lib/mic - - - libiomp5.so - - libimf.so - - libsvml.so - - libirng.so - - libintlc.so.5 +!!! note + /apps/intel/composer_xe_2013.5.192/compiler/lib/mic + - libiomp5.so + - libimf.so + - libsvml.so + - libirng.so + - libintlc.so.5 Finally, to run the compiled code use: @@ -433,8 +434,8 @@ Finally, to run the compiled code use: $ ~/path_to_binary/vect-add-mic ``` -OpenCL -------------------- +## OpenCL + OpenCL (Open Computing Language) is an open standard for general-purpose parallel programming for diverse mix of multi-core CPUs, GPU coprocessors, and other parallel processors. OpenCL provides a flexible execution model and uniform programming environment for software developers to write portable code for systems running on both the CPU and graphics processors or accelerators like the Intel® Xeon Phi. On Anselm OpenCL is installed only on compute nodes with MIC accelerator, therefore OpenCL code can be compiled only on these nodes. @@ -499,8 +500,8 @@ After executing the complied binary file, following output should be displayed. ... ``` -!!! Note "Note" - More information about this example can be found on Intel website: <http://software.intel.com/en-us/vcsource/samples/caps-basic/> +!!! note + More information about this example can be found on Intel website: <http://software.intel.com/en-us/vcsource/samples/caps-basic/> The second example that can be found in "/apps/intel/opencl-examples" directory is General Matrix Multiply. You can follow the the same procedure to download the example to your directory and compile it. @@ -517,7 +518,7 @@ The compilation command for this example is: $ g++ cmdoptions.cpp gemm.cpp ../common/basic.cpp ../common/cmdparser.cpp ../common/oclobject.cpp -I../common -lOpenCL -o gemm -I/apps/intel/opencl/include/ ``` -To see the performance of Intel Xeon Phi performing the DGEMM run the example as follows: +To see the performance of Intel Xeon Phi performing the DGEMM run the example as follows: ```bash ./gemm -d 1 @@ -539,13 +540,12 @@ To see the performance of Intel Xeon Phi performing the DGEMM run the example as ... ``` -!!! Note "Note" - Please note: GNU compiler is used to compile the OpenCL codes for Intel MIC. You do not need to load Intel compiler module. +!!! hint + GNU compiler is used to compile the OpenCL codes for Intel MIC. You do not need to load Intel compiler module. -MPI ------------------ +## MPI -### Environment setup and compilation +### Environment Setup and Compilation Again an MPI code for Intel Xeon Phi has to be compiled on a compute node with accelerator and MPSS software stack installed. To get to a compute node with accelerator use: @@ -598,18 +598,18 @@ An example of basic MPI version of "hello-world" example in C language, that can } ``` -### MPI programming models +### MPI Programming Models Intel MPI for the Xeon Phi coprocessors offers different MPI programming models: -!!! Note "Note" - **Host-only model** - all MPI ranks reside on the host. The coprocessors can be used by using offload pragmas. (Using MPI calls inside offloaded code is not supported.) +!!! note + **Host-only model** - all MPI ranks reside on the host. The coprocessors can be used by using offload pragmas. (Using MPI calls inside offloaded code is not supported.) - **Coprocessor-only model** - all MPI ranks reside only on the coprocessors. + **Coprocessor-only model** - all MPI ranks reside only on the coprocessors. - **Symmetric model** - the MPI ranks reside on both the host and the coprocessor. Most general MPI case. + **Symmetric model** - the MPI ranks reside on both the host and the coprocessor. Most general MPI case. -###Host-only model +### Host-Only Model In this case all environment variables are set by modules, so to execute the compiled MPI program on a single node, use: @@ -626,12 +626,12 @@ The output should be similar to: Hello world from process 0 of 4 on host cn207 ``` -### Coprocessor-only model +### Coprocessor-Only Model There are two ways how to execute an MPI code on a single coprocessor: 1.) lunch the program using "**mpirun**" from the coprocessor; or 2.) lunch the task using "**mpiexec.hydra**" from a host. -**Execution on coprocessor** +#### Execution on Coprocessor Similarly to execution of OpenMP programs in native mode, since the environmental module are not supported on MIC, user has to setup paths to Intel MPI libraries and binaries manually. One time setup can be done by creating a "**.profile**" file in user's home directory. This file sets up the environment on the MIC automatically once user access to the accelerator through the SSH. @@ -649,11 +649,9 @@ Similarly to execution of OpenMP programs in native mode, since the environmenta export PATH=/apps/intel/impi/4.1.1.036/mic/bin/:$PATH ``` -!!! Note "Note" - Please note: - - - this file sets up both environmental variable for both MPI and OpenMP libraries. - - this file sets up the paths to a particular version of Intel MPI library and particular version of an Intel compiler. These versions have to match with loaded modules. +!!! note + \* this file sets up both environmental variable for both MPI and OpenMP libraries. + \* this file sets up the paths to a particular version of Intel MPI library and particular version of an Intel compiler. These versions have to match with loaded modules. To access a MIC accelerator located on a node that user is currently connected to, use: @@ -682,7 +680,7 @@ The output should be similar to: Hello world from process 0 of 4 on host cn207-mic0 ``` -**Execution on host** +#### Execution on Host If the MPI program is launched from host instead of the coprocessor, the environmental variables are not set using the ".profile" file. Therefore user has to specify library paths from the command line when calling "mpiexec". @@ -704,10 +702,9 @@ or using mpirun $ mpirun -genv LD_LIBRARY_PATH /apps/intel/impi/4.1.1.036/mic/lib/ -host mic0 -n 4 ~/mpi-test-mic ``` -!!! Note "Note" - Please note: - - the full path to the binary has to specified (here: "**>~/mpi-test-mic**") - - the LD_LIBRARY_PATH has to match with Intel MPI module used to compile the MPI code +!!! note + \* the full path to the binary has to specified (here: "**>~/mpi-test-mic**") + \* the LD_LIBRARY_PATH has to match with Intel MPI module used to compile the MPI code The output should be again similar to: @@ -718,15 +715,17 @@ The output should be again similar to: Hello world from process 0 of 4 on host cn207-mic0 ``` -!!! Note "Note" - Please note that the **"mpiexec.hydra"** requires a file the MIC filesystem. If the file is missing please contact the system administrators. A simple test to see if the file is present is to execute: +!!! hint + **"mpiexec.hydra"** requires a file the MIC filesystem. If the file is missing please contact the system administrators. + +A simple test to see if the file is present is to execute: ```bash $ ssh mic0 ls /bin/pmi_proxy /bin/pmi_proxy ``` -**Execution on host - MPI processes distributed over multiple accelerators on multiple nodes** +#### Execution on Host - MPI Processes Distributed Over Multiple Accelerators on Multiple Nodes To get access to multiple nodes with MIC accelerator, user has to use PBS to allocate the resources. To start interactive session, that allocates 2 compute nodes = 2 MIC accelerators run qsub command with following parameters: @@ -751,12 +750,11 @@ For example: This output means that the PBS allocated nodes cn204 and cn205, which means that user has direct access to "**cn204-mic0**" and "**cn-205-mic0**" accelerators. -!!! Note "Note" - Please note: At this point user can connect to any of the allocated nodes or any of the allocated MIC accelerators using ssh: - - - to connect to the second node : ** $ ssh cn205** - - to connect to the accelerator on the first node from the first node: **$ ssh cn204-mic0** or **$ ssh mic0** - - to connect to the accelerator on the second node from the first node: **$ ssh cn205-mic0** +!!! note + At this point user can connect to any of the allocated nodes or any of the allocated MIC accelerators using ssh: + - to connect to the second node : `$ ssh cn205` + - to connect to the accelerator on the first node from the first node: `$ ssh cn204-mic0` or `$ ssh mic0` + - to connect to the accelerator on the second node from the first node: `$ ssh cn205-mic0` At this point we expect that correct modules are loaded and binary is compiled. For parallel execution the mpiexec.hydra is used. Again the first step is to tell mpiexec that the MPI can be executed on MIC accelerators by setting up the environmental variable "I_MPI_MIC" @@ -774,6 +772,7 @@ The launch the MPI program use: -host cn204-mic0 -n 4 ~/mpi-test-mic : -host cn205-mic0 -n 6 ~/mpi-test-mic ``` + or using mpirun: ```bash @@ -811,7 +810,7 @@ The same way MPI program can be executed on multiple hosts: : -host cn205 -n 6 ~/mpi-test ``` -###Symmetric model +\###Symmetric model In a symmetric mode MPI programs are executed on both host computer(s) and MIC accelerator(s). Since MIC has a different architecture and requires different binary file produced by the Intel compiler two different files has to be compiled before MPI program is executed. @@ -873,7 +872,7 @@ To run the MPI code using mpirun and the machine file "hosts_file_mix" use: A possible output of the MPI "hello-world" example executed on two hosts and two accelerators is: ```bash - Hello world from process 0 of 8 on host cn204 + Hello world from process 0 of 8 on host cn204 Hello world from process 1 of 8 on host cn204 Hello world from process 2 of 8 on host cn204-mic0 Hello world from process 3 of 8 on host cn204-mic0 @@ -883,22 +882,22 @@ A possible output of the MPI "hello-world" example executed on two hosts and two Hello world from process 7 of 8 on host cn205-mic0 ``` -!!! Note "Note" - Please note: At this point the MPI communication between MIC accelerators on different nodes uses 1Gb Ethernet only. +!!! note + At this point the MPI communication between MIC accelerators on different nodes uses 1Gb Ethernet only. -**Using the PBS automatically generated node-files** +#### Using the PBS Automatically Generated Node-Files PBS also generates a set of node-files that can be used instead of manually creating a new one every time. Three node-files are genereated: -!!! Note "Note" - **Host only node-file:** +!!! note + **Host only node-file:** - /lscratch/${PBS_JOBID}/nodefile-cn MIC only node-file: - /lscratch/${PBS_JOBID}/nodefile-mic Host and MIC node-file: - /lscratch/${PBS_JOBID}/nodefile-mix -Please note each host or accelerator is listed only per files. User has to specify how many jobs should be executed per node using "-n" parameter of the mpirun command. +Each host or accelerator is listed only per files. User has to specify how many jobs should be executed per node using "-n" parameter of the mpirun command. + +## Optimization -Optimization ------------- -For more details about optimization techniques please read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization") +For more details about optimization techniques please read Intel document [Optimization and Performance Tuning for Intel® Xeon Phi™ Coprocessors](http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization "http://software.intel.com/en-us/articles/optimization-and-performance-tuning-for-intel-xeon-phi-coprocessors-part-1-optimization") diff --git a/docs.it4i/salomon/software/java.md b/docs.it4i/salomon/software/java.md index 70b522d0b572bc9acda72317dba0baebe7465f85..703e53fc1093cf28aeb5c80b985174784e54ad90 100644 --- a/docs.it4i/salomon/software/java.md +++ b/docs.it4i/salomon/software/java.md @@ -1,7 +1,4 @@ -Java -==== - -**Java on the cluster** +# Java Java is available on the cluster. Activate java by loading the Java module diff --git a/docs.it4i/salomon/software/mpi/Running_OpenMPI.md b/docs.it4i/salomon/software/mpi/Running_OpenMPI.md index d6656a1223ca0bfe00a90865795adead81303614..9aa54f09aa07ccde2daa1bfc5c6ff4daeab2b78b 100644 --- a/docs.it4i/salomon/software/mpi/Running_OpenMPI.md +++ b/docs.it4i/salomon/software/mpi/Running_OpenMPI.md @@ -1,11 +1,10 @@ -Running OpenMPI -=============== +# Running OpenMPI + +## OpenMPI Program Execution -OpenMPI program execution -------------------------- The OpenMPI programs may be executed only via the PBS Workload manager, by entering an appropriate queue. On the cluster, the **OpenMPI 1.8.6** is OpenMPI based MPI implementation. -### Basic usage +### Basic Usage Use the mpiexec to run the OpenMPI code. @@ -30,7 +29,7 @@ Example: Please be aware, that in this example, the directive **-pernode** is used to run only **one task per node**, which is normally an unwanted behaviour (unless you want to run hybrid code with just one MPI and 24 OpenMP tasks per node). In normal MPI programs **omit the -pernode directive** to run up to 24 MPI tasks per each node. In this example, we allocate 4 nodes via the express queue interactively. We set up the openmpi environment and interactively run the helloworld_mpi.x program. -Note that the executable helloworld_mpi.x must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch filesystem. +Note that the executable helloworld_mpi.x must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch filesystem. You need to preload the executable, if running on the local ramdisk /tmp filesystem @@ -51,7 +50,7 @@ MPI process mapping may be controlled by PBS parameters. The mpiprocs and ompthreads parameters allow for selection of number of running MPI processes per node as well as number of OpenMP threads per MPI process. -### One MPI process per node +### One MPI Process Per Node Follow this example to run one MPI process per node, 24 threads per process. @@ -65,7 +64,7 @@ Follow this example to run one MPI process per node, 24 threads per process. In this example, we demonstrate recommended way to run an MPI application, using 1 MPI processes per node and 24 threads per socket, on 4 nodes. -### Two MPI processes per node +### Two MPI Processes Per Node Follow this example to run two MPI processes per node, 8 threads per process. Note the options to mpiexec. @@ -79,7 +78,7 @@ Follow this example to run two MPI processes per node, 8 threads per process. No In this example, we demonstrate recommended way to run an MPI application, using 2 MPI processes per node and 12 threads per socket, each process and its threads bound to a separate processor socket of the node, on 4 nodes -### 24 MPI processes per node +### 24 MPI Processes Per Node Follow this example to run 24 MPI processes per node, 1 thread per process. Note the options to mpiexec. @@ -93,10 +92,10 @@ Follow this example to run 24 MPI processes per node, 1 thread per process. Note In this example, we demonstrate recommended way to run an MPI application, using 24 MPI processes per node, single threaded. Each process is bound to separate processor core, on 4 nodes. -### OpenMP thread affinity +### OpenMP Thread Affinity -!!! Note "Note" - Important! Bind every OpenMP thread to a core! +!!! note + Important! Bind every OpenMP thread to a core! In the previous two examples with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You might want to avoid this by setting these environment variable for GCC OpenMP: @@ -117,8 +116,8 @@ As of OpenMP 4.0 (supported by GCC 4.9 and later and Intel 14.0 and later) the f $ export OMP_PLACES=cores ``` -OpenMPI Process Mapping and Binding ------------------------------------------------- +## OpenMPI Process Mapping and Binding + The mpiexec allows for precise selection of how the MPI processes will be mapped to the computational nodes and how these processes will bind to particular processor sockets and cores. MPI process mapping may be specified by a hostfile or rankfile input to the mpiexec program. Altough all implementations of MPI provide means for process mapping and binding, following examples are valid for the openmpi only. @@ -189,7 +188,7 @@ In this example we run 5 MPI processes (5 ranks) on four nodes. The rankfile def It is users responsibility to provide correct number of ranks, sockets and cores. -### Bindings verification +### Bindings Verification In all cases, binding and threading may be verified by executing for example: @@ -199,15 +198,15 @@ In all cases, binding and threading may be verified by executing for example: $ mpiexec -bysocket -bind-to-socket echo $OMP_NUM_THREADS ``` -Changes in OpenMPI 1.8 ----------------------- +## Changes in OpenMPI 1.8 + Some options have changed in OpenMPI version 1.8. - |version 1.6.5 |version 1.8.1 | - | --- | --- | - |--bind-to-none |--bind-to none | - |--bind-to-core |--bind-to core | - |--bind-to-socket |--bind-to socket | - |-bysocket |--map-by socket | - |-bycore |--map-by core | - |-pernode |--map-by ppr:1:node | +| version 1.6.5 | version 1.8.1 | +| ---------------- | ------------------- | +| --bind-to-none | --bind-to none | +| --bind-to-core | --bind-to core | +| --bind-to-socket | --bind-to socket | +| -bysocket | --map-by socket | +| -bycore | --map-by core | +| -pernode | --map-by ppr:1:node | diff --git a/docs.it4i/salomon/software/mpi/mpi.md b/docs.it4i/salomon/software/mpi/mpi.md index e17e0a08c04bd1bc102ce0167049ce670d001aae..411d54ddabae7b32ef32f894f2cc466e93eeb866 100644 --- a/docs.it4i/salomon/software/mpi/mpi.md +++ b/docs.it4i/salomon/software/mpi/mpi.md @@ -1,17 +1,15 @@ -MPI -=== +# MPI -Setting up MPI Environment --------------------------- +## Setting Up MPI Environment The Salomon cluster provides several implementations of the MPI library: -|MPI Library|Thread support| -|---|---|--- -|**Intel MPI 4.1**|Full thread support up to, MPI_THREAD_MULTIPLE| -|**Intel MPI 5.0**|Full thread support up to, MPI_THREAD_MULTIPLE| -|OpenMPI 1.8.6|Full thread support up to, MPI_THREAD_MULTIPLE, MPI-3.0, support| -|SGI MPT 2.12 || +| MPI Library | Thread support | +| ----------------- | ------------------------------------------------------------------------------------------------------ | +| **Intel MPI 4.1** | Full thread support up to, MPI_THREAD_MULTIPLE | +| **Intel MPI 5.0** | Full thread support up to, MPI_THREAD_MULTIPLE | +| OpenMPI 1.8.6 | Full thread support up to, MPI_THREAD_MULTIPLE, MPI-3.0, support | +| SGI MPT 2.12 | | MPI libraries are activated via the environment modules. @@ -30,11 +28,10 @@ Look up section modulefiles/mpi in module avail There are default compilers associated with any particular MPI implementation. The defaults may be changed, the MPI libraries may be used in conjunction with any compiler. The defaults are selected via the modules in following way - -|Module|MPI|Compiler suite| -|---|---| -|impi-5.0.3.048-iccifort- Intel MPI 5.0.3| 2015.3.187|| -| OpenMP-1.8.6-GNU-5.1.0-2 OpenMPI 1.8.6| .25|| +| Module | MPI | Compiler suite | +| ---------------------------------------- | ---------- | -------------- | +| impi-5.0.3.048-iccifort- Intel MPI 5.0.3 | 2015.3.187 | | +| OpenMP-1.8.6-GNU-5.1.0-2 OpenMPI 1.8.6 | .25 | | Examples: @@ -52,8 +49,8 @@ To use OpenMPI with the intel compiler suite, use In this example, the openmpi 1.8.6 using intel compilers is activated. It's used "iompi" toolchain. -Compiling MPI Programs ----------------------- +## Compiling MPI Programs + After setting up your MPI environment, compile your program using one of the mpi wrappers ```bash @@ -107,8 +104,8 @@ Compile the above example with $ mpicc helloworld_mpi.c -o helloworld_mpi.x ``` -Running MPI Programs --------------------- +## Running MPI Programs + The MPI program executable must be compatible with the loaded MPI module. Always compile and execute using the very same MPI module. @@ -116,21 +113,21 @@ It is strongly discouraged to mix mpi implementations. Linking an application wi The MPI program executable must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch filesystem. You need to preload the executable, if running on the local scratch /lscratch filesystem. -### Ways to run MPI programs +### Ways to Run MPI Programs Optimal way to run an MPI program depends on its memory requirements, memory access pattern and communication pattern. Consider these ways to run an MPI program: -1. One MPI process per node, 24 threads per process -2. Two MPI processes per node, 12 threads per process -3. 24 MPI processes per node, 1 thread per process. +1\. One MPI process per node, 24 threads per process +2\. Two MPI processes per node, 12 threads per process +3\. 24 MPI processes per node, 1 thread per process. **One MPI** process per node, using 24 threads, is most useful for memory demanding applications, that make good use of processor cache memory and are not memory bound. This is also a preferred way for communication intensive applications as one process per node enjoys full bandwidth access to the network interface. **Two MPI** processes per node, using 12 threads each, bound to processor socket is most useful for memory bandwidth bound applications such as BLAS1 or FFT, with scalable memory demand. However, note that the two processes will share access to the network interface. The 12 threads and socket binding should ensure maximum memory access bandwidth and minimize communication, migration and numa effect overheads. -!!! Note "Note" - Important! Bind every OpenMP thread to a core! +!!! note + Important! Bind every OpenMP thread to a core! In the previous two cases with one or two MPI processes per node, the operating system might still migrate OpenMP threads between cores. You want to avoid this by setting the KMP_AFFINITY or GOMP_CPU_AFFINITY environment variables. diff --git a/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md index 490b2cfc89ae0ccac7425460d5bae362bc3ab834..160478b6ed3c4dbfaf7226759fab0fd8fb9ddc67 100644 --- a/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md +++ b/docs.it4i/salomon/software/mpi/mpi4py-mpi-for-python.md @@ -1,26 +1,25 @@ -MPI4Py (MPI for Python) -======================= +# MPI4Py (MPI for Python) OpenMPI interface to Python -Introduction ------------- +## Introduction + MPI for Python provides bindings of the Message Passing Interface (MPI) standard for the Python programming language, allowing any Python program to exploit multiple processors. This package is constructed on top of the MPI-1/2 specifications and provides an object oriented interface which closely follows MPI-2 C++ bindings. It supports point-to-point (sends, receives) and collective (broadcasts, scatters, gathers) communications of any picklable Python object, as well as optimized communications of Python object exposing the single-segment buffer interface (NumPy arrays, builtin bytes/string/array objects). On Anselm MPI4Py is available in standard Python modules. -Modules -------- +## Modules + MPI4Py is build for OpenMPI. Before you start with MPI4Py you need to load Python and OpenMPI modules. You can use toolchain, that loads Python and OpenMPI at once. ```bash $ module load Python/2.7.9-foss-2015g ``` -Execution ---------- +## Execution + You need to import MPI to your python program. Include the following line to the python script: ```bash @@ -39,10 +38,9 @@ For example $ mpiexec python hello_world.py ``` -Examples --------- +## Examples -### Hello world! +### Hello World! ```cpp from mpi4py import MPI @@ -54,7 +52,7 @@ Examples comm.Barrier() # wait for everybody to synchronize ``` -###Collective Communication with NumPy arrays +### Collective Communication With NumPy Arrays ```cpp from __future__ import division @@ -93,4 +91,4 @@ Execute the above code as: $ mpiexec --map-by core --bind-to core python hello_world.py ``` -In this example, we run MPI4Py enabled code on 4 nodes, 24 cores per node (total of 96 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pythonhosted.org/mpi4py/usrman/index.md). +In this example, we run MPI4Py enabled code on 4 nodes, 24 cores per node (total of 96 processes), each python process is bound to a different core. More examples and documentation can be found on [MPI for Python webpage](https://pypi.python.org/pypi/mpi4py). diff --git a/docs.it4i/salomon/software/numerical-languages/introduction.md b/docs.it4i/salomon/software/numerical-languages/introduction.md index fdd9c0b404da6bc03bd3d5b607b5219957f75eab..50f083a91c52acc731fcbd0abe849904df757221 100644 --- a/docs.it4i/salomon/software/numerical-languages/introduction.md +++ b/docs.it4i/salomon/software/numerical-languages/introduction.md @@ -1,14 +1,13 @@ -Numerical languages -=================== +# Numerical languages Interpreted languages for numerical computations and analysis -Introduction ------------- +## Introduction + This section contains a collection of high-level interpreted languages, primarily intended for numerical computations. -Matlab ------- +## Matlab + MATLAB®^ is a high-level language and interactive environment for numerical computation, visualization, and programming. ```bash @@ -18,8 +17,8 @@ MATLAB®^ is a high-level language and interactive environment for numerical com Read more at the [Matlab page](matlab/). -Octave ------- +## Octave + GNU Octave is a high-level interpreted language, primarily intended for numerical computations. The Octave language is quite similar to Matlab so that most programs are easily portable. ```bash @@ -29,8 +28,7 @@ GNU Octave is a high-level interpreted language, primarily intended for numerica Read more at the [Octave page](octave/). -R ---- +## R The R is an interpreted language and environment for statistical computing and graphics. diff --git a/docs.it4i/salomon/software/numerical-languages/matlab.md b/docs.it4i/salomon/software/numerical-languages/matlab.md index 7eebf17ad0d31a7737dddb75923a9a9244bf98c3..aec28baaedbec6491cfe8ba14a7442368dbdec17 100644 --- a/docs.it4i/salomon/software/numerical-languages/matlab.md +++ b/docs.it4i/salomon/software/numerical-languages/matlab.md @@ -1,12 +1,11 @@ -Matlab -====== +# Matlab + +## Introduction -Introduction ------------- Matlab is available in versions R2015a and R2015b. There are always two variants of the release: -- Non commercial or so called EDU variant, which can be used for common research and educational purposes. -- Commercial or so called COM variant, which can used also for commercial activities. The licenses for commercial variant are much more expensive, so usually the commercial variant has only subset of features compared to the EDU available. +* Non commercial or so called EDU variant, which can be used for common research and educational purposes. +* Commercial or so called COM variant, which can used also for commercial activities. The licenses for commercial variant are much more expensive, so usually the commercial variant has only subset of features compared to the EDU available. To load the latest version of Matlab load the module @@ -22,9 +21,9 @@ By default the EDU variant is marked as default. If you need other version or va If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler. -If you require the Matlab GUI, please follow the general informations about [running graphical applications](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/). +If you require the Matlab GUI, please follow the general information about [running graphical applications](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). -Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part [here](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended. +Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (please see the "GUI Applications on Compute Nodes over VNC" part [here](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended. To run Matlab with GUI, use @@ -40,8 +39,8 @@ To run Matlab in text mode, without the Matlab Desktop GUI environment, use plots, images, etc... will be still available. -Running parallel Matlab using Distributed Computing Toolbox / Engine ------------------------------------------------------------------------- +## Running Parallel Matlab Using Distributed Computing Toolbox / Engine + Distributed toolbox is available only for the EDU variant The MPIEXEC mode available in previous versions is no longer available in MATLAB 2015. Also, the programming interface has changed. Refer to [Release Notes](http://www.mathworks.com/help/distcomp/release-notes.html#buanp9e-1). @@ -58,15 +57,15 @@ To use Distributed Computing, you first need to setup a parallel profile. We hav SalomonPBSPro ``` -Or in the GUI, go to tab HOME -> Parallel -> Manage Cluster Profiles..., click Import and navigate to : +Or in the GUI, go to tab HOME -> Parallel -> Manage Cluster Profiles..., click Import and navigate to : /apps/all/MATLAB/2015b-EDU/SalomonPBSPro.settings With the new mode, MATLAB itself launches the workers via PBS, so you can either use interactive mode or a batch mode on one node, but the actual parallel processing will be done in a separate job started by MATLAB itself. Alternatively, you can use "local" mode to run parallel code on just a single node. -### Parallel Matlab interactive session +### Parallel Matlab Interactive Session -Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/). +Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). ```bash $ xhost + @@ -85,7 +84,7 @@ Once the access to compute nodes is granted by PBS, user can load following modu r1i0n17$ matlab & ``` -### Parallel Matlab batch job in Local mode +### Parallel Matlab Batch Job in Local Mode To run matlab in batch mode, write an matlab script, then write a bash jobscript and execute via the qsub command. By default, matlab will execute one matlab worker instance per allocated core. @@ -120,7 +119,7 @@ Submit the jobscript using qsub $ qsub ./jobscript ``` -### Parallel Matlab Local mode program example +### Parallel Matlab Local Mode Program Example The last part of the configuration is done directly in the user Matlab script before Distributed Computing Toolbox is started. @@ -130,7 +129,8 @@ The last part of the configuration is done directly in the user Matlab script be This script creates scheduler object "cluster" of type "local" that starts workers locally. -Please note: Every Matlab script that needs to initialize/use matlabpool has to contain these three lines prior to calling parpool(sched, ...) function. +!!! hint + Every Matlab script that needs to initialize/use matlabpool has to contain these three lines prior to calling parpool(sched, ...) function. The last step is to start matlabpool with "cluster" object and correct number of workers. We have 24 cores per node, so we start 24 workers. @@ -173,7 +173,7 @@ The complete example showing how to use Distributed Computing Toolbox in local m You can copy and paste the example in a .m file and execute. Note that the parpool size should correspond to **total number of cores** available on allocated nodes. -### Parallel Matlab Batch job using PBS mode (workers spawned in a separate job) +### Parallel Matlab Batch Job Using PBS Mode (Workers Spawned in a Separate Job) This mode uses PBS scheduler to launch the parallel pool. It uses the SalomonPBSPro profile that needs to be imported to Cluster Manager, as mentioned before. This methodod uses MATLAB's PBS Scheduler interface - it spawns the workers in a separate job submitted by MATLAB using qsub. @@ -209,11 +209,12 @@ Note that we first construct a cluster object using the imported profile, then s You can start this script using batch mode the same way as in Local mode example. -### Parallel Matlab Batch with direct launch (workers spawned within the existing job) +### Parallel Matlab Batch With Direct Launch (Workers Spawned Within the Existing Job) This method is a "hack" invented by us to emulate the mpiexec functionality found in previous MATLAB versions. We leverage the MATLAB Generic Scheduler interface, but instead of submitting the workers to PBS, we launch the workers directly within the running job, thus we avoid the issues with master script and workers running in separate jobs (issues with license not available, waiting for the worker's job to spawn etc.) -Please note that this method is experimental. +!!! warning + This method is experimental. For this method, you need to use SalomonDirect profile, import it using [the same way as SalomonPBSPro](matlab.md#running-parallel-matlab-using-distributed-computing-toolbox---engine) @@ -244,35 +245,35 @@ This is an example of m-script using direct mode: delete(pool) ``` -### Non-interactive Session and Licenses +### Non-Interactive Session and Licenses -If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the " -l __feature__matlab__MATLAB=1" for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../../../anselm-cluster-documentation/software/isv_licenses/). +If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, please [look here](../../../anselm/software/isv_licenses/). The licensing feature of PBS is currently disabled. -In case of non-interactive session please read the [following information](../../../anselm-cluster-documentation/software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. +In case of non-interactive session please read the [following information](../../../anselm/software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. -### Matlab Distributed Computing Engines start up time +### Matlab Distributed Computing Engines Start Up Time Starting Matlab workers is an expensive process that requires certain amount of time. For your information please see the following table: - |compute nodes|number of workers|start-up time[s]| - |---|---|---| - |16|384|831| - |8|192|807| - |4|96|483| - |2|48|16| +| compute nodes | number of workers | start-up time[s] | +| ------------- | ----------------- | ---------------- | +| 16 | 384 | 831 | +| 8 | 192 | 807 | +| 4 | 96 | 483 | +| 2 | 48 | 16 | + +## MATLAB on UV2000 -MATLAB on UV2000 ------------------ UV2000 machine available in queue "qfat" can be used for MATLAB computations. This is a SMP NUMA machine with large amount of RAM, which can be beneficial for certain types of MATLAB jobs. CPU cores are allocated in chunks of 8 for this machine. You can use MATLAB on UV2000 in two parallel modes: -### Threaded mode +### Threaded Mode -Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as fft, , eig, svd, etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes. +Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as fft, , eig, svd, etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes. -### Local cluster mode +### Local Cluster Mode You can also use Parallel Toolbox on UV2000. Use l[ocal cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work. diff --git a/docs.it4i/salomon/software/numerical-languages/octave.md b/docs.it4i/salomon/software/numerical-languages/octave.md index a73c43bb1b818b6c1f47760d13e18a796ae20452..6461bc4cc003b806d0f75320d58d5c9009ab5b8b 100644 --- a/docs.it4i/salomon/software/numerical-languages/octave.md +++ b/docs.it4i/salomon/software/numerical-languages/octave.md @@ -1,16 +1,15 @@ -Octave -====== +# Octave GNU Octave is a high-level interpreted language, primarily intended for numerical computations. It provides capabilities for the numerical solution of linear and nonlinear problems, and for performing other numerical experiments. It also provides extensive graphics capabilities for data visualization and manipulation. Octave is normally used through its interactive command line interface, but it can also be used to write non-interactive programs. The Octave language is quite similar to Matlab so that most programs are easily portable. Read more on <http://www.gnu.org/software/octave/> Two versions of octave are available on the cluster, via module - |Status | Version | module| -|---|---| - |**Stable** | Octave 3.8.2 | Octave| +| Status | Version | module | +| ---------- | ------------ | ------ | +| **Stable** | Octave 3.8.2 | Octave | ```bash - $ module load Octave + $ module load Octave ``` The octave on the cluster is linked to highly optimized MKL mathematical library. This provides threaded parallelization to many octave kernels, notably the linear algebra subroutines. Octave runs these heavy calculation kernels without any penalty. By default, octave would parallelize to 24 threads. You may control the threads by setting the OMP_NUM_THREADS environment variable. @@ -30,7 +29,7 @@ To run octave in batch mode, write an octave script, then write a bash jobscript mkdir -p /scratch/work/user/$USER/$PBS_JOBID cd /scratch/work/user/$USER/$PBS_JOBID || exit - # copy input file to scratch + # copy input file to scratch cp $PBS_O_WORKDIR/octcode.m . # load octave module @@ -46,12 +45,12 @@ To run octave in batch mode, write an octave script, then write a bash jobscript exit ``` -This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](../../resource-allocation-and-job-execution/). +This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](../../). The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c code. This is very useful for running native c subroutines in octave environment. ```bash - $ mkoctfile -v + $ mkoctfile -v ``` Octave may use MPI for interprocess communication This functionality is currently not supported on the cluster cluster. In case you require the octave interface to MPI, please contact our [cluster support](https://support.it4i.cz/rt/). diff --git a/docs.it4i/salomon/software/numerical-languages/r.md b/docs.it4i/salomon/software/numerical-languages/r.md index 2c1f610d2c35ded501c8fe862d5ebd8052b97080..6a01926e1b69bdd97d695d19b7a056419408acde 100644 --- a/docs.it4i/salomon/software/numerical-languages/r.md +++ b/docs.it4i/salomon/software/numerical-languages/r.md @@ -1,8 +1,7 @@ -R -=== +# R + +## Introduction -Introduction ------------- The R is a language and environment for statistical computing and graphics. R provides a wide variety of statistical (linear and nonlinear modelling, classical statistical tests, time-series analysis, classification, clustering, ...) and graphical techniques, and is highly extensible. One of R's strengths is the ease with which well-designed publication-quality plots can be produced, including mathematical symbols and formulae where needed. Great care has been taken over the defaults for the minor design choices in graphics, but the user retains full control. @@ -13,25 +12,24 @@ Extensive support for parallel computing is available within R. Read more on <http://www.r-project.org/>, <http://cran.r-project.org/doc/manuals/r-release/R-lang.html> -Modules -------- -**The R version 3.1.1 is available on the cluster, along with GUI interface Rstudio** +## Modules + +The R version 3.1.1 is available on the cluster, along with GUI interface Rstudio -|Application|Version|module| -|---|---| -|**R**|R 3.1.1|R/3.1.1-intel-2015b| -|**Rstudio**|Rstudio 0.98.1103|Rstudio| +| Application | Version | module | +| ----------- | ----------------- | ------------------- | +| **R** | R 3.1.1 | R/3.1.1-intel-2015b | +| **Rstudio** | Rstudio 0.98.1103 | Rstudio | ```bash $ module load R ``` -Execution ---------- +## Execution The R on Anselm is linked to highly optimized MKL mathematical library. This provides threaded parallelization to many R kernels, notably the linear algebra subroutines. The R runs these heavy calculation kernels without any penalty. By default, the R would parallelize to 24 threads. You may control the threads by setting the OMP_NUM_THREADS environment variable. -### Interactive execution +### Interactive Execution To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 forwarding. Run rstudio: @@ -40,7 +38,7 @@ To run R interactively, using Rstudio GUI, log in with ssh -X parameter for X11 $ rstudio ``` -### Batch execution +### Batch Execution To run R in batch mode, write an R script, then write a bash jobscript and execute via the qsub command. By default, R will use 24 threads when running MKL kernels. @@ -68,14 +66,14 @@ Example jobscript: exit ``` -This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section](../../resource-allocation-and-job-execution/job-submission-and-execution/). +This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section](../../job-submission-and-execution/). + +## Parallel R + +Parallel execution of R may be achieved in many ways. One approach is the implied parallelization due to linked libraries or specially enabled functions, as [described above](r/#interactive-execution). In the following sections, we focus on explicit parallelization, where parallel constructs are directly stated within the R script. -Parallel R ----------- -Parallel execution of R may be achieved in many ways. One approach is the implied parallelization due to linked libraries or specially enabled functions, as [described above](r/#interactive-execution). In the following sections, we focus on explicit parallelization, where parallel constructs are directly stated within the R script. +## Package Parallel -Package parallel --------------------- The package parallel provides support for parallel computation, including by forking (taken from package multicore), by sockets (taken from package snow) and random-number generation. The package is activated this way: @@ -98,9 +96,9 @@ Download the package [parallell](package-parallel-vignette.pdf) vignette. The forking is the most simple to use. Forking family of functions provide parallelized, drop in replacement for the serial apply() family of functions. !!! warning - Forking via package parallel provides functionality similar to OpenMP construct omp parallel for + Forking via package parallel provides functionality similar to OpenMP construct omp parallel for - Only cores of single node can be utilized this way! + Only cores of single node can be utilized this way! Forking example: @@ -144,8 +142,8 @@ The above example is the classic parallel example for calculating the number π. Every evaluation of the integrad function runs in parallel on different process. -Package Rmpi ------------- +## Package Rmpi + package Rmpi provides an interface (wrapper) to MPI APIs. It also provides interactive R slave environment. On the cluster, Rmpi provides interface to the [OpenMPI](../mpi/Running_OpenMPI/). @@ -161,7 +159,7 @@ When using package Rmpi, both openmpi and R modules must be loaded Rmpi may be used in three basic ways. The static approach is identical to executing any other MPI programm. In addition, there is Rslaves dynamic MPI approach and the mpi.apply approach. In the following section, we will use the number π integration example, to illustrate all these concepts. -### static Rmpi +### Static Rmpi Static Rmpi programs are executed via mpiexec, as any other MPI programs. Number of processes is static - given at the launch time. @@ -217,7 +215,7 @@ The above is the static MPI example for calculating the number π. Note the **li $ mpirun R --slave --no-save --no-restore -f pi3.R ``` -### dynamic Rmpi +### Dynamic Rmpi Dynamic Rmpi programs are executed by calling the R directly. OpenMPI module must be still loaded. The R slave processes will be spawned by a function call within the Rmpi program. @@ -349,7 +347,7 @@ mpi.apply Rmpi example: mpi.quit() ``` -The above is the mpi.apply MPI example for calculating the number π. Only the slave processes carry out the calculation. Note the **mpi.parSapply(), ** function call. The package parallel [example](r/#package-parallel)[above](r/#package-parallel) may be trivially adapted (for much better performance) to this structure using the mclapply() in place of mpi.parSapply(). +The above is the mpi.apply MPI example for calculating the number π. Only the slave processes carry out the calculation. Note the **mpi.parSapply()**, function call. The package parallel [example](r/#package-parallel) [above](r/#package-parallel) may be trivially adapted (for much better performance) to this structure using the mclapply() in place of mpi.parSapply(). Execute the example as: @@ -357,12 +355,12 @@ Execute the example as: $ mpirun -np 1 R --slave --no-save --no-restore -f pi3parSapply.R ``` -Combining parallel and Rmpi ---------------------------- +## Combining Parallel and Rmpi + Currently, the two packages can not be combined for hybrid calculations. -Parallel execution ------------------- +## Parallel Execution + The R parallel jobs are executed via the PBS queue system exactly as any other parallel jobs. User must create an appropriate jobscript and submit via the **qsub** Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, running 1 process per core: @@ -373,7 +371,7 @@ Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, runnin #PBS -N Rjob #PBS -l select=100:ncpus=24:mpiprocs=24:ompthreads=1 - # change to scratch directory + # change to scratch directory SCRDIR=/scratch/work/user/$USER/myjob cd $SCRDIR || exit @@ -394,10 +392,10 @@ Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, runnin exit ``` -For more information about jobscripts and MPI execution refer to the [Job submission](../../resource-allocation-and-job-execution/job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. +For more information about jobscripts and MPI execution refer to the [Job submission](../../job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. + +## Xeon Phi Offload -Xeon Phi Offload ----------------- By leveraging MKL, R can accelerate certain computations, most notably linear algebra operations on the Xeon Phi accelerator by using Automated Offload. To use MKL Automated Offload, you need to first set this environment variable before R execution: ```bash diff --git a/docs.it4i/salomon/software/operating-system.md b/docs.it4i/salomon/software/operating-system.md index cff49140243e91eab6d31415051aa2a808c2110c..f68a9a97aac216dd727e0973d3ac56754726b90a 100644 --- a/docs.it4i/salomon/software/operating-system.md +++ b/docs.it4i/salomon/software/operating-system.md @@ -1,6 +1,5 @@ -Operating System -================ +# Operating System -The operating system on Salomon is Linux - **CentOS 6.x** +The operating system on Salomon is Linux - [**CentOS 6.x**](https://en.wikipedia.org/wiki/CentOS) -The CentOS Linux distribution is a stable, predictable, manageable and reproducible platform derived from the sources of Red Hat Enterprise Linux (RHEL). \ No newline at end of file +The CentOS Linux distribution is a stable, predictable, manageable and reproducible platform derived from the sources of Red Hat Enterprise Linux (RHEL). diff --git a/docs.it4i/salomon/storage.md b/docs.it4i/salomon/storage.md index c15ea2ec39fe110508afb90a75c8149bc523c991..8c3e651bca8dc33cc6fcb6283d6cd9778a4fd7dd 100644 --- a/docs.it4i/salomon/storage.md +++ b/docs.it4i/salomon/storage.md @@ -1,75 +1,73 @@ -Storage -======= +# Storage -Introduction ------------- +## Introduction There are two main shared file systems on Salomon cluster, the [HOME](#home) and [SCRATCH](#shared-filesystems). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, ramdisk and tmp file systems. -Policy (in a nutshell) ----------------------- +## Policy (In a Nutshell) + !!! note - * Use [HOME](#home) for your most valuable data and programs. - * Use [WORK](#work) for your large project files. - * Use [TEMP](#temp) for large scratch data. + \* Use [HOME](#home) for your most valuable data and programs. + \* Use [WORK](#work) for your large project files. + \* Use [TEMP](#temp) for large scratch data. + !!! warning - Do not use for [archiving](#archiving)! + Do not use for [archiving](#archiving)! -Archiving -------------- +## Archiving Please don't use shared file systems as a backup for large amount of data or long-term archiving mean. The academic staff and students of research institutions in the Czech Republic can use [CESNET storage service](#cesnet-data-storage), which is available via SSHFS. -Shared File systems ----------------------- +## Shared File Systems + Salomon computer provides two main shared file systems, the [HOME file system](#home-filesystem) and the [SCRATCH file system](#scratch-filesystem). The SCRATCH file system is partitioned to [WORK and TEMP workspaces](#shared-workspaces). The HOME file system is realized as a tiered NFS disk storage. The SCRATCH file system is realized as a parallel Lustre file system. Both shared file systems are accessible via the Infiniband network. Extended ACLs are provided on both HOME/SCRATCH file systems for the purpose of sharing data with other users using fine-grained control. -###HOME file system +### HOME File System The HOME file system is realized as a Tiered file system, exported via NFS. The first tier has capacity 100 TB, second tier has capacity 400 TB. The file system is available on all login and computational nodes. The Home file system hosts the [HOME workspace](#home). -###SCRATCH file system +### SCRATCH File System -The architecture of Lustre on Salomon is composed of two metadata servers (MDS) and six data/object storage servers (OSS). Accessible capacity is 1.69 PB, shared among all users. The SCRATCH file system hosts the [WORK and TEMP workspaces](#shared-workspaces). +The architecture of Lustre on Salomon is composed of two metadata servers (MDS) and six data/object storage servers (OSS). Accessible capacity is 1.69 PB, shared among all users. The SCRATCH file system hosts the [WORK and TEMP workspaces](#shared-workspaces). Configuration of the SCRATCH Lustre storage -- SCRATCH Lustre object storage - - Disk array SFA12KX - - 540 x 4 TB SAS 7.2krpm disk - - 54 x OST of 10 disks in RAID6 (8+2) - - 15 x hot-spare disk - - 4 x 400 GB SSD cache -- SCRATCH Lustre metadata storage - - Disk array EF3015 - - 12 x 600 GB SAS 15 krpm disk +* SCRATCH Lustre object storage + * Disk array SFA12KX + * 540 x 4 TB SAS 7.2krpm disk + * 54 x OST of 10 disks in RAID6 (8+2) + * 15 x hot-spare disk + * 4 x 400 GB SSD cache +* SCRATCH Lustre metadata storage + * Disk array EF3015 + * 12 x 600 GB SAS 15 krpm disk -### Understanding the Lustre File systems +### Understanding the Lustre File Systems -(source <http://www.nas.nasa.gov>) +<http://www.nas.nasa.gov> A user file on the Lustre file system can be divided into multiple chunks (stripes) and stored across a subset of the object storage targets (OSTs) (disks). The stripes are distributed among the OSTs in a round-robin fashion to ensure load balancing. -When a client (a compute node from your job) needs to create or access a file, the client queries the metadata server ( MDS) and the metadata target ( MDT) for the layout and location of the [file's stripes](http://www.nas.nasa.gov/hecc/support/kb/Lustre_Basics_224.html#striping). Once the file is opened and the client obtains the striping information, the MDS is no longer involved in the file I/O process. The client interacts directly with the object storage servers (OSSes) and OSTs to perform I/O operations such as locking, disk allocation, storage, and retrieval. +When a client (a compute node from your job) needs to create or access a file, the client queries the metadata server ( MDS) and the metadata target ( MDT) for the layout and location of the [file's stripes](http://www.nas.nasa.gov/hecc/support/kb/Lustre_Basics_224.html#striping). Once the file is opened and the client obtains the striping information, the MDS is no longer involved in the file I/O process. The client interacts directly with the object storage servers (OSSes) and OSTs to perform I/O operations such as locking, disk allocation, storage, and retrieval. If multiple clients try to read and write the same part of a file at the same time, the Lustre distributed lock manager enforces coherency so that all clients see consistent results. There is default stripe configuration for Salomon Lustre file systems. However, users can set the following stripe parameters for their own directories or files to get optimum I/O performance: 1. stripe_size: the size of the chunk in bytes; specify with k, m, or g to use units of KB, MB, or GB, respectively; the size must be an even multiple of 65,536 bytes; default is 1MB for all Salomon Lustre file systems -2. stripe_count the number of OSTs to stripe across; default is 1 for Salomon Lustre file systems one can specify -1 to use all OSTs in the file system. -3. stripe_offset The index of the OST where the first stripe is to be placed; default is -1 which results in random selection; using a non-default value is NOT recommended. +1. stripe_count the number of OSTs to stripe across; default is 1 for Salomon Lustre file systems one can specify -1 to use all OSTs in the file system. +1. stripe_offset The index of the OST where the first stripe is to be placed; default is -1 which results in random selection; using a non-default value is NOT recommended. -!!! Note "Note" - Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. +!!! note + Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. Use the lfs getstripe for getting the stripe parameters. Use the lfs setstripe command for setting the stripe parameters to get optimal I/O performance The correct stripe setting depends on your needs and file access patterns. ```bash -$ lfs getstripe dir|filename -$ lfs setstripe -s stripe_size -c stripe_count -o stripe_offset dir|filename +$ lfs getstripe dir | filename +$ lfs setstripe -s stripe_size -c stripe_count -o stripe_offset dir | filename ``` Example: @@ -96,22 +94,22 @@ $ man lfs ### Hints on Lustre Stripping -!!! Note "Note" - Increase the stripe_count for parallel I/O to the same file. +!!! note + Increase the stripe_count for parallel I/O to the same file. When multiple processes are writing blocks of data to the same file in parallel, the I/O performance for large files will improve when the stripe_count is set to a larger value. The stripe count sets the number of OSTs the file will be written to. By default, the stripe count is set to 1. While this default setting provides for efficient access of metadata (for example to support the ls -l command), large files should use stripe counts of greater than 1. This will increase the aggregate I/O bandwidth by using multiple OSTs in parallel instead of just one. A rule of thumb is to use a stripe count approximately equal to the number of gigabytes in the file. Another good practice is to make the stripe count be an integral factor of the number of processes performing the write in parallel, so that you achieve load balance among the OSTs. For example, set the stripe count to 16 instead of 15 when you have 64 processes performing the writes. -!!! Note "Note" - Using a large stripe size can improve performance when accessing very large files +!!! note + Using a large stripe size can improve performance when accessing very large files Large stripe size allows each client to have exclusive access to its own part of a file. However, it can be counterproductive in some cases if it does not match your I/O pattern. The choice of stripe size has no effect on a single-stripe file. Read more on <http://wiki.lustre.org/manual/LustreManual20_HTML/ManagingStripingFreeSpace.html> -Disk usage and quota commands ------------------------------------------- +## Disk Usage and Quota Commands + User quotas on the Lustre file systems (SCRATCH) can be checked and reviewed using following command: ```bash @@ -123,11 +121,11 @@ Example for Lustre SCRATCH directory: ```bash $ lfs quota /scratch Disk quotas for user user001 (uid 1234): - Filesystem kbytes quota limit grace files quota limit grace - /scratch 8 0 100000000000 - 3 0 0 - + Filesystem kbytes quota limit grace files quota limit grace + /scratch 8 0 100000000000 * 3 0 0 - Disk quotas for group user001 (gid 1234): Filesystem kbytes quota limit grace files quota limit grace - /scratch 8 0 0 - 3 0 0 - + /scratch 8 0 0 * 3 0 0 - ``` In this example, we view current quota size limit of 100TB and 8KB currently used by user001. @@ -143,9 +141,9 @@ Example output: ```bash $ quota Disk quotas for user vop999 (uid 1025): - Filesystem blocks quota limit grace files quota limit grace + Filesystem blocks quota limit grace files quota limit grace home-nfs-ib.salomon.it4i.cz:/home - 28 0 250000000 10 0 500000 + 28 0 250000000 10 0 500000 ``` To have a better understanding of where the space is exactly used, you can use following command to find out. @@ -178,8 +176,8 @@ $ man lfs $ man du ``` -Extended Access Control List (ACL) ----------------------------------- +## Extended Access Control List (ACL) + Extended ACLs provide another security mechanism beside the standard POSIX ACLs which are defined by three entries (for owner/group/others). Extended ACLs have more than the three basic entries. In addition, they also contain a mask entry and may contain any number of named user and named group entries. ACLs on a Lustre file system work exactly like ACLs on any Linux file system. They are manipulated with the standard tools in the standard manner. Below, we create a directory and allow a specific user access. @@ -188,7 +186,7 @@ ACLs on a Lustre file system work exactly like ACLs on any Linux file system. Th [vop999@login1.salomon ~]$ umask 027 [vop999@login1.salomon ~]$ mkdir test [vop999@login1.salomon ~]$ ls -ld test -drwxr-x--- 2 vop999 vop999 4096 Nov 5 14:17 test +drwxr-x--- 2 vop999 vop999 4096 Nov 5 14:17 test [vop999@login1.salomon ~]$ getfacl test # file: test # owner: vop999 @@ -199,7 +197,7 @@ other::--- [vop999@login1.salomon ~]$ setfacl -m user:johnsm:rwx test [vop999@login1.salomon ~]$ ls -ld test -drwxrwx---+ 2 vop999 vop999 4096 Nov 5 14:17 test +drwxrwx---+ 2 vop999 vop999 4096 Nov 5 14:17 test [vop999@login1.salomon ~]$ getfacl test # file: test # owner: vop999 @@ -213,123 +211,120 @@ other::--- Default ACL mechanism can be used to replace setuid/setgid permissions on directories. Setting a default ACL on a directory (-d flag to setfacl) will cause the ACL permissions to be inherited by any newly created file or subdirectory within the directory. Refer to this page for more information on Linux ACL: -[http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html ](http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html) +[http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html](http://www.vanemery.com/Linux/ACL/POSIX_ACL_on_Linux.html) -Shared Workspaces ---------------------- +## Shared Workspaces -###HOME +### Home Users home directories /home/username reside on HOME file system. Accessible capacity is 0.5 PB, shared among all users. Individual users are restricted by file system usage quotas, set to 250 GB per user. If 250 GB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request. -!!! Note "Note" - The HOME file system is intended for preparation, evaluation, processing and storage of data generated by active Projects. +!!! note + The HOME file system is intended for preparation, evaluation, processing and storage of data generated by active Projects. -The HOME should not be used to archive data of past Projects or other unrelated data. +The HOME should not be used to archive data of past Projects or other unrelated data. -The files on HOME will not be deleted until end of the [users lifecycle](../get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials/). +The files on HOME will not be deleted until end of the [users lifecycle](../general/obtaining-login-credentials/obtaining-login-credentials/). The workspace is backed up, such that it can be restored in case of catasthropic failure resulting in significant data loss. This backup however is not intended to restore old versions of user data or to restore (accidentaly) deleted files. -|HOME workspace|| -|---|---| -|Accesspoint|/home/username| -|Capacity|0.5 PB| -|Throughput|6 GB/s| -|User quota|250 GB| -|Protocol|NFS, 2-Tier| +| HOME workspace | | +| -------------- | -------------- | +| Accesspoint | /home/username | +| Capacity | 0.5 PB | +| Throughput | 6 GB/s | +| User quota | 250 GB | +| Protocol | NFS, 2-Tier | -### WORK +### Work The WORK workspace resides on SCRATCH file system. Users may create subdirectories and files in directories **/scratch/work/user/username** and **/scratch/work/project/projectid. **The /scratch/work/user/username is private to user, much like the home directory. The /scratch/work/project/projectid is accessible to all users involved in project projectid. -!!! Note "Note" - The WORK workspace is intended to store users project data as well as for high performance access to input and output files. All project data should be removed once the project is finished. The data on the WORK workspace are not backed up. +!!! note + The WORK workspace is intended to store users project data as well as for high performance access to input and output files. All project data should be removed once the project is finished. The data on the WORK workspace are not backed up. - Files on the WORK file system are **persistent** (not automatically deleted) throughout duration of the project. + Files on the WORK file system are **persistent** (not automatically deleted) throughout duration of the project. The WORK workspace is hosted on SCRATCH file system. The SCRATCH is realized as Lustre parallel file system and is available from all login and computational nodes. Default stripe size is 1 MB, stripe count is 1. There are 54 OSTs dedicated for the SCRATCH file system. -!!! Note "Note" - Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. +!!! note + Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. -|WORK workspace|| -|---|---| -|Accesspoints|/scratch/work/user/username, /scratch/work/user/projectid| -|Capacity |1.6 PB| -|Throughput|30 GB/s| -|User quota|100 TB| -|Default stripe size|1 MB| -|Default stripe count|1| -|Number of OSTs|54| -|Protocol|Lustre| +| WORK workspace | | +| -------------------- | --------------------------------------------------------- | +| Accesspoints | /scratch/work/user/username, /scratch/work/user/projectid | +| Capacity | 1.6 PB | +| Throughput | 30 GB/s | +| User quota | 100 TB | +| Default stripe size | 1 MB | +| Default stripe count | 1 | +| Number of OSTs | 54 | +| Protocol | Lustre | -### TEMP +### Temp The TEMP workspace resides on SCRATCH file system. The TEMP workspace accesspoint is /scratch/temp. Users may freely create subdirectories and files on the workspace. Accessible capacity is 1.6 PB, shared among all users on TEMP and WORK. Individual users are restricted by file system usage quotas, set to 100 TB per user. The purpose of this quota is to prevent runaway programs from filling the entire file system and deny service to other users. >If 100 TB should prove as insufficient for particular user, please contact [support](https://support.it4i.cz/rt), the quota may be lifted upon request. -!!! Note "Note" - The TEMP workspace is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the TEMP workspace as their working directory. +!!! note + The TEMP workspace is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. All I/O intensive jobs must use the TEMP workspace as their working directory. - Users are advised to save the necessary data from the TEMP workspace to HOME or WORK after the calculations and clean up the scratch files. + Users are advised to save the necessary data from the TEMP workspace to HOME or WORK after the calculations and clean up the scratch files. Files on the TEMP file system that are **not accessed for more than 90 days** will be automatically **deleted**. The TEMP workspace is hosted on SCRATCH file system. The SCRATCH is realized as Lustre parallel file system and is available from all login and computational nodes. Default stripe size is 1 MB, stripe count is 1. There are 54 OSTs dedicated for the SCRATCH file system. -!!! Note "Note" - Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. - -|TEMP workspace|| -|---|---| -|Accesspoint|/scratch/temp| -|Capacity|1.6 PB| -|Throughput|30 GB/s| -|User quota|100 TB| -|Default stripe size|1 MB| -|Default stripe count|1| -|Number of OSTs|54| -|Protocol|Lustre| - -RAM disk --------- +!!! note + Setting stripe size and stripe count correctly for your needs may significantly impact the I/O performance you experience. + +| TEMP workspace | | +| -------------------- | ------------- | +| Accesspoint | /scratch/temp | +| Capacity | 1.6 PB | +| Throughput | 30 GB/s | +| User quota | 100 TB | +| Default stripe size | 1 MB | +| Default stripe count | 1 | +| Number of OSTs | 54 | +| Protocol | Lustre | + +## RAM Disk + Every computational node is equipped with file system realized in memory, so called RAM disk. -!!! Note "Note" - Use RAM disk in case you need really fast access to your data of limited size during your calculation. Be very careful, use of RAM disk file system is at the expense of operational memory. +!!! note + Use RAM disk in case you need really fast access to your data of limited size during your calculation. Be very careful, use of RAM disk file system is at the expense of operational memory. The local RAM disk is mounted as /ramdisk and is accessible to user at /ramdisk/$PBS_JOBID directory. The local RAM disk file system is intended for temporary scratch data generated during the calculation as well as for high performance access to input and output files. Size of RAM disk file system is limited. Be very careful, use of RAM disk file system is at the expense of operational memory. It is not recommended to allocate large amount of memory and use large amount of data in RAM disk file system at the same time. -!!! Note "Note" - The local RAM disk directory /ramdisk/$PBS_JOBID will be deleted immediately after the calculation end. Users should take care to save the output data from within the jobscript. +!!! note + The local RAM disk directory /ramdisk/$PBS_JOBID will be deleted immediately after the calculation end. Users should take care to save the output data from within the jobscript. -|RAM disk|| -|---|---| -|Mountpoint| /ramdisk| -|Accesspoint| /ramdisk/$PBS_JOBID| -|Capacity|120 GB| -|Throughput|over 1.5 GB/s write, over 5 GB/s read, single thread, over 10 GB/s write, over 50 GB/s read, 16 threads| -|User quota|none| +| RAM disk | | +| ----------- | ------------------------------------------------------------------------------------------------------- | +| Mountpoint | /ramdisk | +| Accesspoint | /ramdisk/$PBS_JOBID | +| Capacity | 120 GB | +| Throughput | over 1.5 GB/s write, over 5 GB/s read, single thread, over 10 GB/s write, over 50 GB/s read, 16 threads | +| User quota | none | +## Summary -Summary -------- +| Mountpoint | Usage | Protocol | Net | Capacity | Throughput | Limitations | Access | +| ------------- | ------------------------------ | ----------- | ------- | -------- | ------------ | ----------------------- | --------------------------- | +| /home | home directory | NFS, 2-Tier | 0.5 PB | 6 GB/s | Quota 250GB | Compute and login nodes | backed up | +| /scratch/work | large project files | Lustre | 1.69 PB | 30 GB/s | Quota | Compute and login nodes | none | +| /scratch/temp | job temporary data | Lustre | 1.69 PB | 30 GB/s | Quota 100 TB | Compute and login nodes | files older 90 days removed | +| /ramdisk | job temporary data, node local | local | 120GB | 90 GB/s | none | Compute nodes | purged after job ends | -|Mountpoint|Usage|Protocol|Net|Capacity|Throughput|Limitations|Access| -|---|---| -| /home|home directory|NFS, 2-Tier|0.5 PB|6 GB/s|Quota 250GB|Compute and login nodes|backed up| -|/scratch/work|large project files|Lustre|1.69 PB|30 GB/s|Quota|Compute and login nodes|none| -|/scratch/temp|job temporary data|Lustre|1.69 PB|30 GB/s|Quota 100 TB|Compute and login nodes|files older 90 days removed| -|/ramdisk|job temporary data, node local|local|120GB|90 GB/s|none|Compute nodes|purged after job ends| +## CESNET Data Storage -CESNET Data Storage ------------- Do not use shared file systems at IT4Innovations as a backup for large amount of data or long-term archiving purposes. -!!! Note "Note" - The IT4Innovations does not provide storage capacity for data archiving. Academic staff and students of research institutions in the Czech Republic can use [CESNET Storage service](https://du.cesnet.cz/). +!!! note + The IT4Innovations does not provide storage capacity for data archiving. Academic staff and students of research institutions in the Czech Republic can use [CESNET Storage service](https://du.cesnet.cz/). The CESNET Storage service can be used for research purposes, mainly by academic staff and students of research institutions in the Czech Republic. @@ -337,28 +332,27 @@ User of data storage CESNET (DU) association can become organizations or an indi User may only use data storage CESNET for data transfer and storage which are associated with activities in science, research, development, the spread of education, culture and prosperity. In detail see “Acceptable Use Policy CESNET Large Infrastructure (Acceptable Use Policy, AUP)”. -The service is documented at <https://du.cesnet.cz/wiki/doku.php/en/start>. For special requirements please contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz). +The service is documented [here](https://du.cesnet.cz/en/start). For special requirements please contact directly CESNET Storage Department via e-mail [du-support(at)cesnet.cz](mailto:du-support@cesnet.cz). The procedure to obtain the CESNET access is quick and trouble-free. (source [https://du.cesnet.cz/](https://du.cesnet.cz/wiki/doku.php/en/start "CESNET Data Storage")) -CESNET storage access ---------------------- +## CESNET Storage Access -### Understanding CESNET storage +### Understanding CESNET Storage -!!! Note "Note" - It is very important to understand the CESNET storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. +!!! note + It is very important to understand the CESNET storage before uploading data. [Please read](<https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first>) Once registered for CESNET Storage, you may [access the storage](https://du.cesnet.cz/en/navody/faq/start) in number of ways. We recommend the SSHFS and RSYNC methods. ### SSHFS Access -!!! Note "Note" - SSHFS: The storage will be mounted like a local hard drive +!!! note + SSHFS: The storage will be mounted like a local hard drive -The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable hard drive. Files can be than copied in and out in a usual fashion. +The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable hard drive. Files can be than copied in and out in a usual fashion. First, create the mount point @@ -398,16 +392,16 @@ Once done, please remember to unmount the storage $ fusermount -u cesnet ``` -### Rsync access +### Rsync Access -!!! Note "Note" - Rsync provides delta transfer for best performance, can resume interrupted transfers +!!! note + Rsync provides delta transfer for best performance, can resume interrupted transfers Rsync is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination. Rsync is widely used for backups and mirroring and as an improved copy command for everyday use. Rsync finds files that need to be transferred using a "quick check" algorithm (by default) that looks for files that have changed in size or in last-modified time. Any changes in the other preserved attributes (as requested by options) are made on the destination file directly when the quick check indicates that the file's data does not need to be updated. -More about Rsync at <https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele> +More about Rsync at [here](https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele) Transfer large files to/from CESNET storage, assuming membership in the Storage VO diff --git a/docs.it4i/software/bio-gentoo.md b/docs.it4i/software/bioinformatics.md similarity index 67% rename from docs.it4i/software/bio-gentoo.md rename to docs.it4i/software/bioinformatics.md index c96a6bf0646740a11d8508bd11bc61b86fe65d0d..76991fe7810ea45fdf7a77ed1cd03adf20a79152 100644 --- a/docs.it4i/software/bio-gentoo.md +++ b/docs.it4i/software/bioinformatics.md @@ -1,26 +1,21 @@ -Bioinformatics Applications -========================== +# Bioinformatics Applications -Introduction ------------- +## Introduction -In addition to the many applications available through modules (deployed through EasyBuild packaging system) we provide an alternative source of applications on our clusters inferred from Gentoo Linux ( www.gentoo.org ). The user's environment is setup through a script which returns a bash instance to the user (you can think of it a starting a whole virtual machine but inside your current namespace) . The applications were optimized by gcc compiler for the SandyBridge and IvyBridge platforms. The binaries use paths from /apps/gentoo prefix to find the required runtime dependencies, config files, etc. The Gentoo Linux is a standalone installation not even relying on the glibc provided by host operating system (Redhat). The trick which allowed us to install Gentoo Linux on the host Redhat system is called Gentoo::RAP and uses a modified loader with a hardcoded path ( https://wiki.gentoo.org/wiki/Prefix/libc ). +In addition to the many applications available through modules (deployed through EasyBuild packaging system) we provide an alternative source of applications on our clusters inferred from [Gentoo Linux](https://www.gentoo.org/). The user's environment is setup through a script which returns a bash instance to the user (you can think of it a starting a whole virtual machine but inside your current namespace) . The applications were optimized by gcc compiler for the SandyBridge and IvyBridge platforms. The binaries use paths from /apps/gentoo prefix to find the required runtime dependencies, config files, etc. The Gentoo Linux is a standalone installation not even relying on the glibc provided by host operating system (Redhat). The trick which allowed us to install Gentoo Linux on the host Redhat system is called Gentoo::RAP and uses a modified loader with a hardcoded path ([links](https://wiki.gentoo.org/wiki/Prefix/libc)). - -Starting the environment ------------------------- +## Starting the Environment ```bash -$ /apps/gentoo/startprefix +mmokrejs@login2~$ /apps/gentoo/startprefix ``` -Starting PBS jobs using the applications ----------------------------------------- +## Starting PBS Jobs Using the Applications Create a template file which can be used and an argument to qsub command. Notably, the 'PBS -S' line specifies full PATH to the Bourne shell of the Gentoo Linux environment. ```bash -$ cat myjob.pbs +mmokrejs@login2~$ cat myjob.pbs #PBS -S /apps/gentoo/bin/sh #PBS -l nodes=1:ppn=16,walltime=12:00:00 #PBS -q qfree @@ -40,19 +35,17 @@ $ qsub myjob.pbs $ qstat ``` -Reading manual pages for installed applications ------------------------------------------------ +## Reading Manual Pages for Installed Applications ```bash -$ man -M /apps/gentoo/usr/share/man bwa -$ man -M /apps/gentoo/usr/share/man samtools +mmokrejs@login2~$ man -M /apps/gentoo/usr/share/man bwa +mmokrejs@login2~$ man -M /apps/gentoo/usr/share/man samtools ``` -Listing of bioinformatics applications --------------------------------------- +## Listing of Bioinformatics Applications ```bash -mmokrejs@login2 ~ $ grep biology /scratch/mmokrejs/gentoo_rap/installed.txt +mmokrejs@login2~$ grep biology /scratch/mmokrejs/gentoo_rap/installed.txt sci-biology/ANGLE-bin-20080813-r1 sci-biology/AlignGraph-9999 sci-biology/Atlas-Link-0.01-r1 @@ -180,7 +173,7 @@ sci-biology/zmsort-110625 ``` ```bash -mmokrejs@login2 ~ $ grep sci-libs /scratch/mmokrejs/gentoo_rap/installed.txt +mmokrejs@login2~$ grep sci-libs /scratch/mmokrejs/gentoo_rap/installed.txt sci-libs/amd-2.3.1 sci-libs/blas-reference-20151113-r1 sci-libs/camd-2.3.1 @@ -208,53 +201,35 @@ sci-libs/qrupdate-1.1.2-r1 sci-libs/scikits-0.1-r1 sci-libs/suitesparseconfig-4.2.1 sci-libs/umfpack-5.6.2 -mmokrejs@login2 ~ $ - - error-correctors - aligners - clusterers - assemblers - scaffolders - motif searching - ORF/gene prediction/genome annotation - genotype/haplotype/popullation genetics - phylogenetics - transcriptome analysis - utilities - GUI - libraries ``` -Classification of applications ------------------------------- - -|Applications for bioinformatics at IT4I | -|---|---| -|error-correctors|6| -|aligners|20| -|clusterers|5| -|assemblers|9| -|scaffolders|6| -|motif searching|6| -|ORF/gene prediction/genome annotation|13| -|genotype/haplotype/popullation genetics|3| -|phylogenetics|1| -|transcriptome analysis|2| -|utilities|15| -|GUI|3| -|libraries|4| -|**Total**|**93**| +## Classification of Applications + +| Applications for bioinformatics at IT4I | | +| --------------------------------------- | ------ | +| error-correctors | 6 | +| aligners | 20 | +| clusterers | 5 | +| assemblers | 9 | +| scaffolders | 6 | +| motif searching | 6 | +| ORF/gene prediction/genome annotation | 13 | +| genotype/haplotype/popullation genetics | 3 | +| phylogenetics | 1 | +| transcriptome analysis | 2 | +| utilities | 15 | +| GUI | 3 | +| libraries | 4 | +| **Total** | **93** |  - -Other applications available through Gentoo Linux -------------------------------------------------- +## Other Applications Available Through Gentoo Linux Gentoo Linux is a allows compilation of its applications from source code while using compiler and optimize flags set to user's wish. This facilitates creation of optimized binaries for the host platform. Users maybe also use several versions of gcc, python and other tools. ```bash -$ gcc-config -l -$ java-config -L -$ eselect +mmokrejs@login2~$ gcc-config -l +mmokrejs@login2~$ java-config -L +mmokrejs@login2~$ eselect ``` diff --git a/docs.it4i/software/eb.md b/docs.it4i/software/eb.md new file mode 100644 index 0000000000000000000000000000000000000000..cde35d8897f81963bc1db77a0412c023b6a7c7a0 --- /dev/null +++ b/docs.it4i/software/eb.md @@ -0,0 +1 @@ +# EasyBuild diff --git a/docs.it4i/software/lmod.md b/docs.it4i/software/lmod.md new file mode 100644 index 0000000000000000000000000000000000000000..3ddd5cc1d1951de11047ea7cdfca91198d11aa19 --- /dev/null +++ b/docs.it4i/software/lmod.md @@ -0,0 +1,353 @@ +# Lmod Environment + +Lmod is a modules tool, a modern alternative to the oudated & no longer actively maintained Tcl-based environment modules tool. + +Detailed documentation on Lmod is available at [here](http://lmod.readthedocs.io). + +!!! warning + All the new modules will be availabe in Lmod environment only. + +## Important Dates + +| Date | Action | +| ---------- | ---------------------- | +| 2017-02-01 | Testing phase | +| 2017-03-01 | Global deployment lmod | + +## How to Activate Testing Lmod Enviroment? + +Create folder or file `.lmod` into your home folder. Logout and login. New Lmod enviroment will be active now. + +```bash +$ mkdir ~/.lmod +$ logout +Connection to login4.salomon.it4i.cz closed. + +local~$ ssh vop999@login.it4i.cz + _____ _ + / ____| | | + | (___ __ _| | ___ _ __ ___ ___ _ __ + \___ \ / _` | |/ _ \| '_ ` _ \ / _ \| '_ \ + ____) | (_| | | (_) | | | | | | (_) | | | | + |_____/ \__,_|_|\___/|_| |_| |_|\___/|_| |_| + + http://www.it4i.cz/?lang=en + +$ +$ ml +No modules loaded +``` + +## Benefits + +* significantly more responsive module commands, in particular module avail (ml av) +* easier to use interface +* module files can be written in either Tcl or Lua syntax (and both types of modules can be mixed together) + +## Introduction + +Below you will find more details and examples. + +| command | equivalent/explanation | +| ------------------------ | ---------------------------------------------------------------- | +| ml | module list | +| ml GCC/6.2.0-2.27 | module load GCC/6.2.0-2.27 | +| ml -GCC/6.2.0-2.27 | module unload GCC/6.2.0-2.27 | +| ml purge | module unload all modules | +| ml av | module avail | +| ml show GCC/6.2.0-2.27 | module show GCC | +| ml spider | gcc searches (case-insensitive) for gcc in all available modules | +| ml spider GCC/6.2.0-2.27 | show all information about the module GCC/6.2.0-2.27 | +| ml save mycollection | stores the currently loaded modules to a collection | +| ml restore mycollection | restores a previously stored collection of modules | + +## Listing Loaded Modules + +To get an overview of the currently loaded modules, use module list or ml (without specifying extra arguments). + +```bash +$ ml +Currently Loaded Modules: + 1) EasyBuild/3.0.0 (S) 2) lmod/7.2.2 + Where: + S: Module is Sticky, requires --force to unload or purge +``` + +!!! tip + For more details on sticky modules, see the section on [ml purge](#resetting-by-unloading-all-modules). + +## Searching for Available Modules + +To get an overview of all available modules, you can use ml avail or simply ml av: + +```bash +$ ml av +---------------------------------------- /apps/modules/compiler ---------------------------------------------- + GCC/5.2.0 GCCcore/6.2.0 (D) icc/2013.5.192 ifort/2013.5.192 LLVM/3.9.0-intel-2017.00 (D) + ... ... + +---------------------------------------- /apps/modules/devel ------------------------------------------------- + Autoconf/2.69-foss-2015g CMake/3.0.0-intel-2016.01 M4/1.4.17-intel-2016.01 pkg-config/0.27.1-foss-2015g + Autoconf/2.69-foss-2016a CMake/3.3.1-foss-2015g M4/1.4.17-intel-2017.00 pkg-config/0.27.1-intel-2015b + ... ... +``` + +In the current module naming scheme, each module name consists of two parts: + +* the part before the first /, corresponding to the software name +* the remainder, corresponding to the software version, the compiler toolchain that was used to install the software, and a possible version suffix + +!!! tip + The (D) indicates that this particular version of the module is the default, but we strongly recommend to not rely on this as the default can change at any point. Usuall, the default will point to the latest version available. + +## Searching for Modules + +If you just provide a software name, for example gcc, it prints on overview of all available modules for GCC. + +```bash +$ ml spider gcc +--------------------------------------------------------------------------------- + GCC: +--------------------------------------------------------------------------------- + Description: + The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). - Homepage: http://gcc.gnu.org/ + + Versions: + GCC/4.4.7-system + GCC/4.7.4 + GCC/4.8.3 + GCC/4.9.2-binutils-2.25 + GCC/4.9.2 + GCC/4.9.3-binutils-2.25 + GCC/4.9.3 + GCC/4.9.3-2.25 + GCC/5.1.0-binutils-2.25 + GCC/5.2.0 + GCC/5.3.0-binutils-2.25 + GCC/5.3.0-2.25 + GCC/5.3.0-2.26 + GCC/5.3.1-snapshot-20160419-2.25 + GCC/5.4.0-2.26 + GCC/6.2.0-2.27 + + Other possible modules matches: + GCCcore +--------------------------------------------------------------------------------- + To find other possible module matches do: + module -r spider '.*GCC.*' +--------------------------------------------------------------------------------- + For detailed information about a specific "GCC" module (including how to load the modules) use the module's full name. + For example: + $ module spider GCC/6.2.0-2.27 +--------------------------------------------------------------------------------- +``` + +!!! tip + Spider is case-insensitive. + +If you use spider on a full module name like GCC/6.2.0-2.27 it will tell on which cluster(s) that module available: + +```bash +$ module spider GCC/6.2.0-2.27 +-------------------------------------------------------------------------------------------------------------- + GCC: GCC/6.2.0-2.27 +-------------------------------------------------------------------------------------------------------------- + Description: + The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, as well as libraries for these languages (libstdc++, libgcj,...). - Homepage: http://gcc.gnu.org/ + + This module can be loaded directly: module load GCC/6.2.0-2.27 + + Help: + The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Java, and Ada, + as well as libraries for these languages (libstdc++, libgcj,...). - Homepage: http://gcc.gnu.org/ +``` + +This tells you what the module contains and a URL to the homepage of the software. + +## Available Modules for a Particular Software Package + +To check which modules are available for a particular software package, you can provide the software name to ml av. +For example, to check which versions of git are available: + +```bash +$ ml av git + +-------------------------------------- /apps/modules/tools ---------------------------------------- + git/2.8.0-GNU-4.9.3-2.25 git/2.8.0-intel-2017.00 git/2.9.0 git/2.9.2 git/2.11.0 (D) + + Where: + D: Default Module + +Use "module spider" to find all possible modules. +Use "module keyword key1 key2 ..." to search for all possible modules matching any of the "keys". +``` + +!!! tip + The specified software name is treated case-insensitively. + +Lmod does a partial match on the module name, so sometimes you need to use / to indicate the end of the software name you are interested in: + +```bash +$ ml av GCC/ + +------------------------------------------ /apps/modules/compiler ------------------------------------------- +GCC/4.4.7-system GCC/4.8.3 GCC/4.9.2 GCC/4.9.3 GCC/5.1.0-binutils-2.25 GCC/5.3.0-binutils-2.25 GCC/5.3.0-2.26 GCC/5.4.0-2.26 GCC/4.7.4 GCC/4.9.2-binutils-2.25 GCC/4.9.3-binutils-2.25 GCC/4.9.3-2.25 GCC/5.2.0 GCC/5.3.0-2.25 GCC/6.2.0-2.27 (D) + + Where: + D: Default Module + +Use "module spider" to find all possible modules. +Use "module keyword key1 key2 ..." to search for all possible modules matching any of the "keys". +``` + +## Inspecting a Module + +To see how a module would change the environment, use ml show: + +```bash +$ ml show Python/3.5.2 + +help([[Python is a programming language that lets you work more quickly and integrate your systems more effectively. - Homepage: http://python.org/]]) +whatis("Description: Python is a programming language that lets you work more quickly and integrate your systems more effectively. - Homepage: http://python.org/") +conflict("Python") +load("bzip2/1.0.6") +load("zlib/1.2.8") +load("libreadline/6.3") +load("ncurses/5.9") +load("SQLite/3.8.8.1") +load("Tk/8.6.3") +load("GMP/6.0.0a") +load("XZ/5.2.2") +prepend_path("CPATH","/apps/all/Python/3.5.2/include") +prepend_path("LD_LIBRARY_PATH","/apps/all/Python/3.5.2/lib") +prepend_path("LIBRARY_PATH","/apps/all/Python/3.5.2/lib") +prepend_path("MANPATH","/apps/all/Python/3.5.2/share/man") +prepend_path("PATH","/apps/all/Python/3.5.2/bin") +prepend_path("PKG_CONFIG_PATH","/apps/all/Python/3.5.2/lib/pkgconfig") +setenv("EBROOTPYTHON","/apps/all/Python/3.5.2") +setenv("EBVERSIONPYTHON","3.5.2") +setenv("EBDEVELPYTHON","/apps/all/Python/3.5.2/easybuild/Python-3.5.2-easybuild-devel") +setenv("EBEXTSLISTPYTHON","setuptools-20.1.1,pip-8.0.2,nose-1.3.7") +``` + +!!! tip + Note that both the direct changes to the environment as well as other modules that will be loaded are shown. + +If you're not sure what all of this means: don't worry, you don't have to know, just try loading the module as try using the software. + +## Loading Modules + +The effectively apply the changes to the environment that are specified by a module, use ml and specify the name of the module. +For example, to set up your environment to use intel: + +```bash +$ ml intel/2017.00 +$ ml +Currently Loaded Modules: + 1) GCCcore/5.4.0 + 2) binutils/2.26-GCCcore-5.4.0 (H) + 3) icc/2017.0.098-GCC-5.4.0-2.26 + 4) ifort/2017.0.098-GCC-5.4.0-2.26 + 5) iccifort/2017.0.098-GCC-5.4.0-2.26 + 6) impi/2017.0.098-iccifort-2017.0.098-GCC-5.4.0-2.26 + 7) iimpi/2017.00-GCC-5.4.0-2.26 + 8) imkl/2017.0.098-iimpi-2017.00-GCC-5.4.0-2.26 + 9) intel/2017.00 + + Where: + H: Hidden Module +``` + +!!! tip + Note that even though we only loaded a single module, the output of ml shows that a whole bunch of modules were loaded, which are required dependencies for intel/2017.00. + +## Conflicting Modules + +!!! warning + It is important to note that **only modules that are compatible with each other can be loaded together. In particular, modules must be installed either with the same toolchain as the modules that** are already loaded, or with a compatible (sub)toolchain. + +For example, once you have loaded one or more modules that were installed with the intel/2017.00 toolchain, all other modules that you load should have been installed with the same toolchain. + +In addition, only **one single version** of each software package can be loaded at a particular time. For example, once you have the Python/3.5.2-intel-2017.00 module loaded, you can not load a different version of Python in the same session/job script, neither directly, nor indirectly as a dependency of another module you want to load. + +## Unloading Modules + +To revert the changes to the environment that were made by a particular module, you can use ml -<modname>. +For example: + +```bash +$ ml +Currently Loaded Modules: + 1) EasyBuild/3.0.0 (S) 2) lmod/7.2.2 +$ which gcc +/usr/bin/gcc +$ ml GCC/ +$ ml +Currently Loaded Modules: + 1) EasyBuild/3.0.0 (S) 2) lmod/7.2.2 3) GCCcore/6.2.0 4) binutils/2.27-GCCcore-6.2.0 (H) 5) GCC/6.2.0-2.27 +$ which gcc +/apps/all/GCCcore/6.2.0/bin/gcc +$ ml -GCC +$ ml +Currently Loaded Modules: + 1) EasyBuild/3.0.0 (S) 2) lmod/7.2.2 3) GCCcore/6.2.0 4) binutils/2.27-GCCcore-6.2.0 (H) +$ which gcc +/usr/bin/gcc +``` + +## Resetting by Unloading All Modules + +To reset your environment back to a clean state, you can use ml purge or ml purge --force: + +```bash +$ ml +Currently Loaded Modules: + 1) EasyBuild/3.0.0 (S) 2) lmod/7.2.2 3) GCCcore/6.2.0 4) binutils/2.27-GCCcore-6.2.0 (H) +$ ml purge +The following modules were not unloaded: + (Use "module --force purge" to unload all): + 1) EasyBuild/3.0.0 +$ ml +Currently Loaded Modules: + 1) EasyBuild/3.0.0 (S) +$ ml purge --force +$ ml +No modules loaded +``` + +As such, you should not (re)load the cluster module anymore after running ml purge. + +## Module Collections + +If you have a set of modules that you need to load often, you can save these in a collection (only works with Lmod). + +First, load all the modules you need, for example: + +```bash +ml intel/2017.00 Python/3.5.2-intel-2017.00 +``` + +Now store them in a collection using ml save: + +```bash +$ ml save my-collection +``` + +Later, for example in a job script, you can reload all these modules with ml restore: + +```bash +$ ml restore my-collection +``` + +With ml savelist can you gets a list of all saved collections: + +```bash +$ ml savelist +Named collection list: + 1) my-collection + 2) my-test-collection +``` + +To inspect a collection, use ml describe. + +To remove a module collection, remove the corresponding entry in $HOME/.lmod.d. diff --git a/docs.it4i/software/orca.md b/docs.it4i/software/orca.md index 93e6473431ea7172c6cde0a1f16d383023c82b00..8fcfd69bfb44f9f978b18d8b8ac4e82a71653f36 100644 --- a/docs.it4i/software/orca.md +++ b/docs.it4i/software/orca.md @@ -1,10 +1,8 @@ -ORCA -==== +# ORCA ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry with specific emphasis on spectroscopic properties of open-shell molecules. It features a wide variety of standard quantum chemical methods ranging from semiempirical methods to DFT to single- and multireference correlated ab initio methods. It can also treat environmental and relativistic effects. -Making orca available ---------------------- +## Making ORCA Available The following module command makes the latest version of orca available to your session @@ -12,7 +10,7 @@ The following module command makes the latest version of orca available to your $ module load ORCA/3_0_3-linux_x86-64 ``` -**Dependency** +### Dependency ```bash $ module list @@ -30,21 +28,20 @@ Currently Loaded Modulefiles: 11) ORCA/3_0_3-linux_x86-64 ``` -Example single core job ------------------------ +## Example Single Core Job Create a file called orca_serial.inp that contains the following orca commands -``` -# My first ORCA calculation :-) -# -# Taken from the Orca manual -# https://orcaforum.cec.mpg.de/OrcaManual.pdf -! HF SVP -* xyz 0 1 - C 0 0 0 - O 0 0 1.13 -* +```cpp + # My first ORCA calculation :-) + # + # Taken from the Orca manual + # https://orcaforum.cec.mpg.de/OrcaManual.pdf + ! HF SVP + * xyz 0 1 + C 0 0 0 + O 0 0 1.13 + * ``` Create a Sun Grid Engine submission file called submit_serial.sh that looks like this @@ -92,13 +89,10 @@ TOTAL RUN TIME: 0 days 0 hours 0 minutes 2 seconds 796 msec qsub: job 196821.isrv5 completed ``` -Register as a user ------------------- +## Register as a User You are encouraged to register as a user of Orca at [Here](https://orcaforum.cec.mpg.de/) in order to take advantage of updates, announcements and also of the users forum. -Documentation -------------- +## Documentation A comprehensive [.pdf](https://orcaforum.cec.mpg.de/OrcaManual.pdf) manual is available online. - diff --git a/docs.it4i/src/mympiprog_32p_2014-10-15_16-56.html b/docs.it4i/src/mympiprog_32p_2014-10-15_16-56.html new file mode 100644 index 0000000000000000000000000000000000000000..ce60070a9ee25a91973a577fd048d88f31d4680e --- /dev/null +++ b/docs.it4i/src/mympiprog_32p_2014-10-15_16-56.html @@ -0,0 +1,610 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> +<title>mympiprog.x - Performance Report</title> +<style type="text/css"> +body { + font-family: "Lucida Grande", "Lucida Sans Unicode", "Bitstream Vera Sans",sans-serif; + font-size: 1em; + color: #2c1a0a; +} +div#content { + width: 900px; + margin-left: auto; + margin-right: auto; +} +.header { + padding-top: 16px; +} +.header_left { + float:left; + width: 532px; +} +.logo { + float: left; +} +.logo img { height: 150px; } +#time_radar { + float: right; +} +#time_radar .legend_CPU { fill: #4fd32e; font-size: 1.5em; } +#time_radar .legend_MPI { fill: #409ded; font-size: 1.5em; } +#time_radar .legend_IO { fill: #ed8140; font-size: 1.5em; } +.clear { + clear: both; +} +table { + border-spacing: 0; +} +td { + padding-left: 0; + padding-right: 16px; + padding-top: 1px; + padding-bottom: 1px; +} +#error { + border: 1px solid; + margin: 16px 0px; + padding: 16px 16px 16px 16px; + color: #C80000; + background-color: #FFC0C0; +} +#error p { + margin: 8px; +} +.application_details { + margin-top: 8px; + margin-left: 16px; + margin-right: 16px; +} +.application_details .details_key { width: 100px; color: #bdc4d5;} +.application_details table { + table-layout: fixed; + width: 100%; + color: #37537b; +} +.application_details #cmdline { + word-wrap: break-word; +} +.application_details #exe_path { + word-wrap: break-word; +} +.summary { +} +hr { + margin-top: 32px; + margin-bottom: 32px; + visibility: hidden; +} +.summary .heading { + font-family: inherit; + font-size: 1.8em; + padding-bottom: 8px; +} +#summary_cpu_class { + font-weight: bold; +} +#summary_mpi_class { + font-weight: bold; +} +#summary_io_class { + font-weight: bold; +} +.overview_general_advice p { + margin-top: 8px; + margin-bottom: 8px; +} +.subsections { + margin-bottom: 32px; +} +.subsections .heading { + font-family: inherit; + font-size: 2em; + padding-bottom: 8px; +} +.subsections .heading_cpu { + padding-bottom: 8px; +} +.subsections .heading_mpi { + padding-bottom: 8px; +} +.subsections .heading_ram { + padding-bottom: 8px; +} +.subsections .heading_io { + padding-bottom: 8px; +} +.subsections .explanation { + font-size: 0.9em; + color: #404040; + padding-top: 8px; +} + +.ltcol, .ctcol { float: left; width: 436px; padding-right: 16px; } +.rtcol { float: right; width: 436px; } + +.heading_cpu { color: #4fd32e; font-size: 1.5em; } +.heading_mpi { color: #409ded; font-size: 1.5em; } +.heading_ram { color: #ed4040; font-size: 1.5em; } +.heading_io { color: #ed8140; font-size: 1.5em; } +.cpu_span { color: #4fd32e; } +.mpi_span { color: #409ded; } +.ram { color: #ed4040; } +.io_span { color: #ed8140; } +.bar_graph { width: 200px; } +#cpu_bar { background-color: #4fd32e; width: 0; height: 2em; } +#mpi_bar { background-color: #409ded; width: 0; height: 2em; } +#io_bar { background-color: #ed8140; width: 0; height: 2em; } +.summary_table { padding-top: 16px; padding-bottom: 8px; } +.summary_table td p { margin: 0px; margin-bottom: 4px; } +.summary_table td { padding-bottom: 8px; } +.summary_table .details { font-size: 0.9em; } +.balanced_span { color: #bb58d6; } +.right_cell { text-align: right; } + +#cpu_chart { padding-top: 8px; } +#cpu_chart td { font-size: 0.9em; padding-bottom: 8px; } +.mini_bar_graph { width: 50px; } +#cpu_num_bar { background-color: #4fd32e; width: 0; height: 1em; } +#cpu_vec_bar { background-color: #3c9f23; width: 0; height: 1em; } +#cpu_mem_bar { background-color: #266516; width: 0; height: 1em; } +#cpu_other_bar { background-color: #808080; width: 0; height: 1em; } +.cpu_num_span { color: #4fd32e; } +.cpu_vec_span { color: #3c9f23; } +.cpu_mem_span { color: #266516; } +.cpu_other_span { color: #808080; } + +#mpi_chart { padding-top: 8px; } +#mpi_chart td { font-size: 0.9em; padding-bottom: 8px; } +#mpi_col_bar { background-color: #409ded; width: 0; height: 1em; } +#mpi_p2p_bar { background-color: #2f73ad; width: 0; height: 1em; } +#mpi_colrate_bar { background-color: #255a87; width: 0; height: 1em; } +#mpi_p2prate_bar { background-color: #1f4a70; width: 0; height: 1em; } +.mpi_col_span { color: #409ded; } +.mpi_p2p_span { color: #2f73ad; } +.mpi_colrate_span { color: #255a87; } +.mpi_p2prate_span { color: #1f4a70; } + +#ram_chart { padding-top: 8px; } +#ram_chart td { font-size: 0.9em; padding-bottom: 8px; } +#ram_mean_bar { background-color: #ed4040; width: 0; height: 1em; } +#ram_peak_bar { background-color: #b53131; width: 0; height: 1em; } +#ram_node_bar { background-color: #742020; width: 0; height: 1em; } +.ram_mean_span { color: #ed4040; } +.ram_peak_span { color: #b53131; } +.ram_node_span { color: #742020; } + +#io_chart { padding-top: 8px; } +#io_chart td { font-size: 0.9em; padding-bottom: 8px; } +#io_read_bar { background-color: #ed8140; width: 0; height: 1em; } +#io_write_bar { background-color: #a95e0b; width: 0; height: 1em; } +#io_readrate_bar { background-color: #9b7c14; width: 0; height: 1em; } +#io_writerate_bar { background-color: #7b6210; width: 0; height: 1em; } +.io_read_span { color: #ed8140; } +.io_write_span { color: #a95e0b; } +.io_readrate_span { color: #9b7c14; } +.io_writerate_span { color: #7b6210; } + +</style> +<script type="text/javascript">d3=function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(){}function o(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function a(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=la.length;r>e;++e){var u=la[e]+t;if(u in n)return u}}function c(){}function s(){}function l(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function f(){$o.event.preventDefault()}function h(){for(var n,t=$o.event;n=t.sourceEvent;)t=n;return t}function g(n){for(var t=new s,e=0,r=arguments.length;++e<r;)t[arguments[e]]=l(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=$o.event;u.target=n,$o.event=u,t[u.type].apply(e,r)}finally{$o.event=i}}},t}function p(n){return ha(n,ma),n}function v(n){return"function"==typeof n?n:function(){return ga(n,this)}}function d(n){return"function"==typeof n?n:function(){return pa(n,this)}}function m(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=$o.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function y(n){return n.trim().replace(/\s+/g," ")}function x(n){return new RegExp("(?:^|\\s+)"+$o.requote(n)+"(?:\\s+|$)","g")}function M(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=n.trim().split(/\s+/).map(_);var u=n.length;return"function"==typeof t?r:e}function _(n){var t=x(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",y(u+" "+n))):e.setAttribute("class",y(u.replace(t," ")))}}function b(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function w(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function S(n){return"function"==typeof n?n:(n=$o.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function k(n){return{__data__:n}}function E(n){return function(){return da(this,n)}}function A(n){return arguments.length||(n=$o.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function C(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function N(n){return ha(n,xa),n}function L(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function T(){var n=this.__transition__;n&&++n.active}function q(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=s(t,Wo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+$o.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),s=z;a>0&&(n=n.substring(0,a));var l=_a.get(n);return l&&(n=l,s=R),a?t?u:r:t?c:i}function z(n,t){return function(e){var r=$o.event;$o.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{$o.event=r}}}function R(n,t){var e=z(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function D(){var n=".dragsuppress-"+ ++wa,t="click"+n,e=$o.select(Ko).on("touchmove"+n,f).on("dragstart"+n,f).on("selectstart"+n,f);if(ba){var r=Go.style,u=r[ba];r[ba]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),ba&&(r[ba]=u),i&&(e.on(t,function(){f(),o()},!0),setTimeout(o,0))}}function P(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>Sa&&(Ko.scrollX||Ko.scrollY)){e=$o.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();Sa=!(u.f||u.e),e.remove()}return Sa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function U(n){return n>0?1:0>n?-1:0}function j(n){return n>1?0:-1>n?ka:Math.acos(n)}function H(n){return n>1?Aa:-1>n?-Aa:Math.asin(n)}function F(n){return((n=Math.exp(n))-1/n)/2}function O(n){return((n=Math.exp(n))+1/n)/2}function Y(n){return((n=Math.exp(2*n))-1)/(n+1)}function I(n){return(n=Math.sin(n/2))*n}function Z(){}function V(n,t,e){return new X(n,t,e)}function X(n,t,e){this.h=n,this.s=t,this.l=e}function $(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,ot(u(n+120),u(n),u(n-120))}function B(n,t,e){return new W(n,t,e)}function W(n,t,e){this.h=n,this.c=t,this.l=e}function J(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),G(e,Math.cos(n*=La)*t,Math.sin(n)*t)}function G(n,t,e){return new K(n,t,e)}function K(n,t,e){this.l=n,this.a=t,this.b=e}function Q(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=tt(u)*Oa,r=tt(r)*Ya,i=tt(i)*Ia,ot(rt(3.2404542*u-1.5371385*r-.4985314*i),rt(-.969266*u+1.8760108*r+.041556*i),rt(.0556434*u-.2040259*r+1.0572252*i))}function nt(n,t,e){return n>0?B(Math.atan2(e,t)*Ta,Math.sqrt(t*t+e*e),n):B(0/0,0/0,n)}function tt(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function et(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function rt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ut(n){return ot(n>>16,255&n>>8,255&n)}function it(n){return ut(n)+""}function ot(n,t,e){return new at(n,t,e)}function at(n,t,e){this.r=n,this.g=t,this.b=e}function ct(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function st(n,t,e){var r,u,i,o=0,a=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(n))switch(u=r[2].split(","),r[1]){case"hsl":return e(parseFloat(u[0]),parseFloat(u[1])/100,parseFloat(u[2])/100);case"rgb":return t(gt(u[0]),gt(u[1]),gt(u[2]))}return(i=Xa.get(n))?t(i.r,i.g,i.b):(null!=n&&"#"===n.charAt(0)&&(4===n.length?(o=n.charAt(1),o+=o,a=n.charAt(2),a+=a,c=n.charAt(3),c+=c):7===n.length&&(o=n.substring(1,3),a=n.substring(3,5),c=n.substring(5,7)),o=parseInt(o,16),a=parseInt(a,16),c=parseInt(c,16)),t(o,a,c))}function lt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),V(r,u,c)}function ft(n,t,e){n=ht(n),t=ht(t),e=ht(e);var r=et((.4124564*n+.3575761*t+.1804375*e)/Oa),u=et((.2126729*n+.7151522*t+.072175*e)/Ya),i=et((.0193339*n+.119192*t+.9503041*e)/Ia);return G(116*u-16,500*(r-u),200*(u-i))}function ht(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function gt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function pt(n){return"function"==typeof n?n:function(){return n}}function vt(n){return n}function dt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),mt(t,e,n,r)}}function mt(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=$o.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Ko.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=$o.event;$o.event=n;try{o.progress.call(i,c)}finally{$o.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Wo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},$o.rebind(i,o,"on"),null==r?i:i.get(yt(r))}function yt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function xt(){var n=Mt(),t=_t()-n;t>24?(isFinite(t)&&(clearTimeout(Ja),Ja=setTimeout(xt,t)),Wa=0):(Wa=1,Ka(xt))}function Mt(){var n=Date.now();for(Ga=$a;Ga;)n>=Ga.t&&(Ga.f=Ga.c(n-Ga.t)),Ga=Ga.n;return n}function _t(){for(var n,t=$a,e=1/0;t;)t.f?t=n?n.n=t.n:$a=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return Ba=n,e}function bt(n,t){var e=Math.pow(10,3*aa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function wt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function St(n){return n+""}function kt(){}function Et(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function At(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function Ct(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function Nt(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)Ct(n[e],t,1);t.polygonEnd()}function Lt(){function n(n,t){n*=La,t=t*La/2+ka/4;var e=n-r,o=Math.cos(t),a=Math.sin(t),c=i*a,s=u*o+c*Math.cos(e),l=c*Math.sin(e);hc.add(Math.atan2(l,s)),r=n,u=o,i=a}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*La,u=Math.cos(a=(e=a)*La/2+ka/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function Tt(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function qt(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function zt(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function Rt(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function Dt(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function Pt(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function Ut(n){return[Math.atan2(n[1],n[0]),H(n[2])]}function jt(n,t){return aa(n[0]-t[0])<Ca&&aa(n[1]-t[1])<Ca}function Ht(n,t){n*=La;var e=Math.cos(t*=La);Ft(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function Ft(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function Ot(){function n(n,u){n*=La;var i=Math.cos(u*=La),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),Ft(t,e,r)}var t,e,r;kc.point=function(u,i){u*=La;var o=Math.cos(i*=La);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,Ft(t,e,r)}}function Yt(){kc.point=Ht}function It(){function n(n,t){n*=La;var e=Math.cos(t*=La),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-j(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),Ft(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=La;var c=Math.cos(a*=La);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),Ft(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Yt,kc.point=Ht}}function Zt(){return!0}function Vt(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(jt(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new $t(e,n,null,!0),s=new $t(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new $t(r,n,null,!1),s=new $t(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Xt(i),Xt(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Xt(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function $t(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Bt(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Wt))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=$o.merge(g);var n=Kt(m,p);g.length?Vt(g,Gt,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Jt(),M=t(x);return y}}function Wt(n){return n.length>1}function Jt(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:c,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Gt(n,t){return((n=n.x)[0]<0?n[1]-Aa-Ca:Aa-n[1])-((t=t.x)[0]<0?t[1]-Aa-Ca:Aa-t[1])}function Kt(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+ka/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+ka/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=aa(_)>ka,w=p*x;if(hc.add(Math.atan2(w*Math.sin(_),v*M+w*Math.cos(_))),i+=b?_+(_>=0?Ea:-Ea):_,b^h>=e^m>=e){var S=zt(Tt(f),Tt(n));Pt(S);var k=zt(u,S);Pt(k);var E=(b^_>=0?-1:1)*H(k[2]);(r>E||r===E&&(S[0]||S[1]))&&(o+=b^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Ca>i||Ca>i&&0>hc)^1&o}function Qt(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?ka:-ka,c=aa(i-e);aa(c-ka)<Ca?(n.point(e,r=(r+o)/2>0?Aa:-Aa),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=ka&&(aa(e-u)<Ca&&(e-=u*Ca),aa(i-a)<Ca&&(i-=a*Ca),r=ne(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function ne(n,t,e,r){var u,i,o=Math.sin(n-e);return aa(o)>Ca?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function te(n,t,e,r){var u;if(null==n)u=e*Aa,r.point(-ka,u),r.point(0,u),r.point(ka,u),r.point(ka,0),r.point(ka,-u),r.point(0,-u),r.point(-ka,-u),r.point(-ka,0),r.point(-ka,u);else if(aa(n[0]-t[0])>Ca){var i=n[0]<t[0]?ka:-ka;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function ee(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?ka:-ka),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(jt(e,g)||jt(p,g))&&(p[0]+=Ca,p[1]+=Ca,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&jt(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=Tt(n),u=Tt(t),o=[1,0,0],a=zt(r,u),c=qt(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=zt(o,a),p=Dt(o,f),v=Dt(a,h);Rt(p,v);var d=g,m=qt(p,d),y=qt(d,d),x=m*m-y*(qt(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=Dt(d,(-m-M)/y);if(Rt(_,p),_=Ut(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=aa(A-ka)<Ca,N=C||Ca>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(aa(_[0]-w)<Ca?k:E):k<=_[1]&&_[1]<=E:A>ka^(w<=_[0]&&_[0]<=S)){var L=Dt(d,(-m+M)/y);return Rt(L,p),[_,Ut(L)]}}}function u(t,e){var r=o?n:ka-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=aa(i)>Ca,c=Le(n,6*La);return Bt(t,e,c,o?[0,-n]:[-ka,n-ka])}function re(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function ue(n,t,e,r){function u(r,u){return aa(r[0]-n)<Ca?u>0?0:3:aa(r[0]-e)<Ca?u>0?2:1:aa(r[1]-t)<Ca?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=m.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=m[u],c=a.length,l=a[0];c>o;++o)i=a[o],l[1]<=r?i[1]>r&&s(l,i,n)>0&&++t:i[1]<=r&&s(l,i,n)<0&&--t,l=i;return 0!==t}function s(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(e[0]-n[0])*(t[1]-n[1])}function l(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function f(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function h(n,t){f(n,t)&&a.point(n,t)}function g(){L.point=v,m&&m.push(y=[]),k=!0,S=!1,b=w=0/0}function p(){d&&(v(x,M),_&&S&&C.rejoin(),d.push(C.buffer())),L.point=h,S&&a.lineEnd()}function v(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=f(n,t);if(m&&y.push([n,t]),k)x=n,M=t,_=e,k=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&S)a.point(n,t);else{var r={a:{x:b,y:w},b:{x:n,y:t}};N(r)?(S||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),E=!1):e&&(a.lineStart(),a.point(n,t),E=!1)}b=n,w=t,S=e}var d,m,y,x,M,_,b,w,S,k,E,A=a,C=Jt(),N=re(n,t,e,r),L={point:h,lineStart:g,lineEnd:p,polygonStart:function(){a=C,d=[],m=[],E=!0},polygonEnd:function(){a=A,d=$o.merge(d);var t=c([n,r]),e=E&&t,u=d.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),l(null,null,1,a),a.lineEnd()),u&&Vt(d,i,t,l,a),a.polygonEnd()),d=m=y=null}};return L}}function ie(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function oe(n){var t=0,e=ka/3,r=be(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*ka/180,e=n[1]*ka/180):[180*(t/ka),180*(e/ka)]},u}function ae(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,H((i-(n*n+e*e)*u*u)/(2*u))]},e}function ce(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function se(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function le(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=fe(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=fe(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function fe(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function he(n,t){dc+=n,mc+=t,++yc}function ge(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,he(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,he(t=r,e=u)}}function pe(){Pc.point=he}function ve(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,he(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,he(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function de(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,Ea)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:c};return a}function me(n){function t(n){return(a?r:e)(n)}function e(t){return Me(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=Tt([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=aa(aa(w)-1)<Ca?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||aa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*La),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function ye(n){var t=me(function(t,e){return n([t*Ta,e*Ta])});return function(n){return we(t(n))}}function xe(n){this.stream=n}function Me(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function _e(n){return be(function(){return n})()}function be(n){function t(n){return n=a(n[0]*La,n[1]*La),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*Ta,n[1]*Ta]}function r(){a=ie(o=Ee(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=me(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=vt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=we(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):ee((b=+n)*La),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?ue(n[0][0],n[0][1],n[1][0],n[1][1]):vt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*La,d=n[1]%360*La,r()):[v*Ta,d*Ta]},t.rotate=function(n){return arguments.length?(m=n[0]%360*La,y=n[1]%360*La,x=n.length>2?n[2]%360*La:0,r()):[m*Ta,y*Ta,x*Ta]},$o.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function we(n){return Me(n,function(t,e){n.point(t*La,e*La)})}function Se(n,t){return[n,t]}function ke(n,t){return[n>ka?n-Ea:-ka>n?n+Ea:n,t]}function Ee(n,t,e){return n?t||e?ie(Ce(n),Ne(t,e)):Ce(n):t||e?Ne(t,e):ke}function Ae(n){return function(t,e){return t+=n,[t>ka?t-Ea:-ka>t?t+Ea:t,e]}}function Ce(n){var t=Ae(n);return t.invert=Ae(-n),t}function Ne(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),H(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),H(l*r-a*u)]},e}function Le(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=Te(e,u),i=Te(e,i),(o>0?i>u:u>i)&&(u+=o*Ea)):(u=n+o*Ea,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=Ut([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function Te(n,t){var e=Tt(t);e[0]-=n,Pt(e);var r=j(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Ca)%(2*Math.PI)}function qe(n,t,e){var r=$o.range(n,t-Ca,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function ze(n,t,e){var r=$o.range(n,t-Ca,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function Re(n){return n.source}function De(n){return n.target}function Pe(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(I(r-t)+u*o*I(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*Ta,Math.atan2(o,Math.sqrt(r*r+u*u))*Ta]}:function(){return[n*Ta,t*Ta]};return p.distance=h,p}function Ue(){function n(n,u){var i=Math.sin(u*=La),o=Math.cos(u),a=aa((n*=La)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*La,e=Math.sin(i*=La),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=c}}function je(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function He(n,t){function e(n,t){var e=aa(aa(t)-Aa)<Ca?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(ka/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=U(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Aa]},e):Oe}function Fe(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return aa(u)<Ca?Se:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-U(u)*Math.sqrt(n*n+e*e)]},e)}function Oe(n,t){return[n,Math.log(Math.tan(ka/4+t/2))]}function Ye(n){var t,e=_e(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=ka*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function Ie(n,t){var e=Math.cos(t)*Math.sin(n);return[Math.log((1+e)/(1-e))/2,Math.atan2(Math.tan(t),Math.cos(n))]}function Ze(n){return n[0]}function Ve(n){return n[1]}function Xe(n,t,e,r){var u,i,o,a,c,s,l;return u=r[n],i=u[0],o=u[1],u=r[t],a=u[0],c=u[1],u=r[e],s=u[0],l=u[1],(l-o)*(a-i)-(c-o)*(s-i)>0}function $e(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Be(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function We(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Je(){mr(this),this.edge=this.site=this.circle=null}function Ge(n){var t=Jc.pop()||new Je;return t.site=n,t}function Ke(n){cr(n),$c.remove(n),Jc.push(n),mr(n)}function Qe(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Ke(n);for(var c=i;c.circle&&aa(e-c.circle.x)<Ca&&aa(r-c.circle.cy)<Ca;)i=c.P,a.unshift(c),Ke(c),c=i;a.unshift(c),cr(c);for(var s=o;s.circle&&aa(e-s.circle.x)<Ca&&aa(r-s.circle.cy)<Ca;)o=s.N,a.push(s),Ke(s),s=o;a.push(s),cr(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],pr(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=hr(c.site,s.site,null,u),ar(c),ar(s)}function nr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=tr(a,o)-i,r>Ca)a=a.L;else{if(u=i-er(a,o),!(u>Ca)){r>-Ca?(t=a.P,e=a):u>-Ca?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Ge(n);if($c.insert(t,c),t||e){if(t===e)return cr(t),e=Ge(t.site),$c.insert(c,e),c.edge=e.edge=hr(t.site,c.site),ar(t),ar(e),void 0;if(!e)return c.edge=hr(t.site,c.site),void 0;cr(t),cr(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};pr(e.edge,s,p,M),c.edge=hr(s,n,null,M),e.edge=hr(n,p,null,M),ar(t),ar(e)}}function tr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function er(n,t){var e=n.N;if(e)return tr(e,t);var r=n.site;return r.y===t?r.x:1/0}function rr(n){this.site=n,this.edges=[]}function ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(aa(r-t)>Ca||aa(u-e)>Ca)&&(a.splice(o,0,new vr(gr(i.site,l,aa(r-f)<Ca&&p-u>Ca?{x:f,y:aa(t-f)<Ca?e:p}:aa(u-p)<Ca&&h-r>Ca?{x:aa(e-p)<Ca?t:h,y:p}:aa(r-h)<Ca&&u-g>Ca?{x:h,y:aa(t-h)<Ca?e:g}:aa(u-g)<Ca&&r-f>Ca?{x:aa(e-g)<Ca?t:f,y:g}:null),i.site,null)),++c)}function ir(n,t){return t.angle-n.angle}function or(){mr(this),this.x=this.y=this.arc=this.site=this.cy=null}function ar(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Na)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new or;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function cr(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),mr(t),n.circle=null)}function sr(n){for(var t,e=Vc,r=re(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!lr(t,n)||!r(t)||aa(t.a.x-t.b.x)<Ca&&aa(t.a.y-t.b.y)<Ca)&&(t.a=t.b=null,e.splice(u,1))}function lr(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2; +if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function fr(n,t){this.l=n,this.r=t,this.a=this.b=null}function hr(n,t,e,r){var u=new fr(n,t);return Vc.push(u),e&&pr(u,n,t,e),r&&pr(u,t,n,r),Xc[n.i].edges.push(new vr(u,n,t)),Xc[t.i].edges.push(new vr(u,t,n)),u}function gr(n,t,e){var r=new fr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function pr(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function vr(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function dr(){this._=null}function mr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function yr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function xr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Mr(n){for(;n.L;)n=n.L;return n}function _r(n,t){var e,r,u,i=n.sort(br).pop();for(Vc=[],Xc=new Array(n.length),$c=new dr,Wc=new dr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new rr(i),nr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;Qe(u.arc)}t&&(sr(t),ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function br(n,t){return t.y-n.y||t.x-n.x}function wr(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function Sr(n){return n.x}function kr(n){return n.y}function Er(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function Ar(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&Ar(n,c[0],e,r,o,a),c[1]&&Ar(n,c[1],o,r,u,a),c[2]&&Ar(n,c[2],e,a,o,i),c[3]&&Ar(n,c[3],o,a,u,i)}}function Cr(n,t){n=$o.rgb(n),t=$o.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+ct(Math.round(e+i*n))+ct(Math.round(r+o*n))+ct(Math.round(u+a*n))}}function Nr(n,t){var e,r={},u={};for(e in n)e in t?r[e]=qr(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function Lr(n,t){return t-=n=+n,function(e){return n+t*e}}function Tr(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=Lr(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function qr(n,t){for(var e,r=$o.interpolators.length;--r>=0&&!(e=$o.interpolators[r](n,t)););return e}function zr(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(qr(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function Rr(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function Dr(n){return function(t){return 1-n(1-t)}}function Pr(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function Ur(n){return n*n}function jr(n){return n*n*n}function Hr(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function Fr(n){return function(t){return Math.pow(t,n)}}function Or(n){return 1-Math.cos(n*Aa)}function Yr(n){return Math.pow(2,10*(n-1))}function Ir(n){return 1-Math.sqrt(1-n*n)}function Zr(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/Ea*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*Ea/t)}}function Vr(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function Xr(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function $r(n,t){n=$o.hcl(n),t=$o.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return J(e+i*n,r+o*n,u+a*n)+""}}function Br(n,t){n=$o.hsl(n),t=$o.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return $(e+i*n,r+o*n,u+a*n)+""}}function Wr(n,t){n=$o.lab(n),t=$o.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return Q(e+i*n,r+o*n,u+a*n)+""}}function Jr(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Gr(n){var t=[n.a,n.b],e=[n.c,n.d],r=Qr(t),u=Kr(t,e),i=Qr(nu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*Ta,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*Ta:0}function Kr(n,t){return n[0]*t[0]+n[1]*t[1]}function Qr(n){var t=Math.sqrt(Kr(n,n));return t&&(n[0]/=t,n[1]/=t),t}function nu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function tu(n,t){var e,r=[],u=[],i=$o.transform(n),o=$o.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:Lr(a[0],c[0])},{i:3,x:Lr(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:Lr(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:Lr(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:Lr(g[0],p[0])},{i:e-2,x:Lr(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function eu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function ru(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function uu(n){for(var t=n.source,e=n.target,r=ou(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function iu(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function ou(n,t){if(n===t)return n;for(var e=iu(n),r=iu(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function au(n){n.fixed|=2}function cu(n){n.fixed&=-7}function su(n){n.fixed|=4,n.px=n.x,n.py=n.y}function lu(n){n.fixed&=-5}function fu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(fu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function hu(n,t){return $o.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=du,n}function gu(n){return n.children}function pu(n){return n.value}function vu(n,t){return t.value-n.value}function du(n){return $o.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function mu(n){return n.x}function yu(n){return n.y}function xu(n,t,e){n.y0=t,n.y=e}function Mu(n){return $o.range(n.length)}function _u(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function bu(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function wu(n){return n.reduce(Su,0)}function Su(n,t){return n+t[1]}function ku(n,t){return Eu(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function Eu(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function Au(n){return[$o.min(n),$o.max(n)]}function Cu(n,t){return n.parent==t.parent?1:2}function Nu(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function Lu(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function Tu(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=Tu(e[i],t),n)>0&&(n=r);return n}function qu(n,t){return n.x-t.x}function zu(n,t){return t.x-n.x}function Ru(n,t){return n.depth-t.depth}function Du(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function Pu(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function Uu(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function ju(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function Hu(n,t){return n.value-t.value}function Fu(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Ou(n,t){n._pack_next=t,t._pack_prev=n}function Yu(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function Iu(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(Zu),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],$u(r,u,i),t(i),Fu(r,i),r._pack_prev=i,Fu(i,u),u=r._pack_next,o=3;s>o;o++){$u(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(Yu(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!Yu(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Ou(r,u=a):Ou(r=c,u),o--):(Fu(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Vu)}}function Zu(n){n._pack_next=n._pack_prev=n}function Vu(n){delete n._pack_next,delete n._pack_prev}function Xu(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)Xu(u[i],t,e,r)}function $u(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Bu(n){return 1+$o.max(n,function(n){return n.y})}function Wu(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ju(n){var t=n.children;return t&&t.length?Ju(t[0]):n}function Gu(n){var t,e=n.children;return e&&(t=e.length)?Gu(e[t-1]):n}function Ku(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function Qu(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function ni(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function ti(n){return n.rangeExtent?n.rangeExtent():ni(n.range())}function ei(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function ri(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ss}function ii(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=$o.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function oi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ii:ei,c=r?ru:eu;return o=u(n,t,c,e),a=u(t,n,c,qr),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Jr)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return li(n,t)},i.tickFormat=function(t,e){return fi(n,t,e)},i.nice=function(t){return ci(n,t),u()},i.copy=function(){return oi(n,t,e,r)},u()}function ai(n,t){return $o.rebind(n,t,"range","rangeRound","interpolate","clamp")}function ci(n,t){return ri(n,ui(si(n,t)[2]))}function si(n,t){null==t&&(t=10);var e=ni(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function li(n,t){return $o.range.apply($o,si(n,t))}function fi(n,t,e){var r=si(n,t);return $o.format(e?e.replace(uc,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+gi(l,r),l].join("")}):",."+hi(r[2])+"f")}function hi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function gi(n,t){var e=hi(t[2]);return n in ls?Math.abs(e-hi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function pi(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=ri(r.map(u),e?Math:hs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=ni(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return fs;arguments.length<2?t=fs:"function"!=typeof t&&(t=$o.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return pi(n.copy(),t,e,r)},ai(o,n)}function vi(n,t,e){function r(t){return n(u(t))}var u=di(t),i=di(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return li(e,n)},r.tickFormat=function(n,t){return fi(e,n,t)},r.nice=function(n){return r.domain(ci(e,n))},r.exponent=function(o){return arguments.length?(u=di(t=o),i=di(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return vi(n.copy(),t,e)},ai(r,n)}function di(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function mi(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return $o.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return ni(t.a[0])},e.copy=function(){return mi(n,t)},e.domain(n)}function yi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=$o.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[$o.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort($o.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return yi(n,t)},e()}function xi(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return xi(n,t,e)},u()}function Mi(n,t){function e(e){return e>=e?t[$o.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Mi(n,t)},e}function _i(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return li(n,t)},t.tickFormat=function(t,e){return fi(n,t,e)},t.copy=function(){return _i(n)},t}function bi(n){return n.innerRadius}function wi(n){return n.outerRadius}function Si(n){return n.startAngle}function ki(n){return n.endAngle}function Ei(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=pt(e),p=pt(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=Ze,r=Ve,u=Zt,i=Ai,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=xs.get(n)||Ai).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function Ai(n){return n.join("L")}function Ci(n){return Ai(n)+"Z"}function Ni(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function Li(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function Ti(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function qi(n,t){return n.length<4?Ai(n):n[1]+Di(n.slice(1,n.length-1),Pi(n,t))}function zi(n,t){return n.length<3?Ai(n):n[0]+Di((n.push(n[0]),n),Pi([n[n.length-2]].concat(n,[n[1]]),t))}function Ri(n,t){return n.length<3?Ai(n):n[0]+Di(n,Pi(n,t))}function Di(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return Ai(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function Pi(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function Ui(n){if(n.length<3)return Ai(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",Oi(bs,o),",",Oi(bs,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),Yi(c,o,a);return n.pop(),c.push("L",r),c.join("")}function ji(n){if(n.length<4)return Ai(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(Oi(bs,i)+","+Oi(bs,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),Yi(e,i,o);return e.join("")}function Hi(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[Oi(bs,o),",",Oi(bs,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),Yi(t,o,a);return t.join("")}function Fi(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return Ui(n)}function Oi(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function Yi(n,t,e){n.push("C",Oi(Ms,t),",",Oi(Ms,e),",",Oi(_s,t),",",Oi(_s,e),",",Oi(bs,t),",",Oi(bs,e))}function Ii(n,t){return(t[1]-n[1])/(t[0]-n[0])}function Zi(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=Ii(u,i);++t<e;)r[t]=(o+(o=Ii(u=i,i=n[t+1])))/2;return r[t]=o,r}function Vi(n){for(var t,e,r,u,i=[],o=Zi(n),a=-1,c=n.length-1;++a<c;)t=Ii(n[a],n[a+1]),aa(t)<Ca?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Xi(n){return n.length<3?Ai(n):n[0]+Di(n,Vi(n))}function $i(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ms,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Bi(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=pt(e),_=pt(u),b=e===r?function(){return g}:pt(r),w=u===i?function(){return p}:pt(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=Ze,r=Ze,u=0,i=Ve,o=Zt,a=Ai,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=xs.get(n)||Ai).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function Wi(n){return n.radius}function Ji(n){return[n.x,n.y]}function Gi(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ms;return[e*Math.cos(r),e*Math.sin(r)]}}function Ki(){return 64}function Qi(){return"circle"}function no(n){var t=Math.sqrt(n/ka);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function to(n,t){return ha(n,Cs),n.id=t,n}function eo(n,t,e,r){var u=n.id;return C(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function ro(n){return null==n&&(n=""),function(){this.textContent=n}}function uo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,$o.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),$o.timer(function(){return p.c=c(r||1)?Zt:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ga,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function io(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function oo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function ao(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function co(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new Ds(e-1)),1),e}function i(n,e){return t(n=new Ds(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{Ds=ao;var r=new ao;return r._=n,o(r,t,e)}finally{Ds=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=so(n);return c.floor=c,c.round=so(r),c.ceil=so(u),c.offset=so(i),c.range=a,n}function so(n){return function(t,e){try{Ds=ao;var r=new ao;return r._=t,n(r,e)._}finally{Ds=Date}}}function lo(n){function t(t){for(var r,u,i,o=[],a=-1,c=0;++a<e;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=nl[r=n.charAt(++a)])&&(r=n.charAt(++a)),(i=tl[r])&&(r=i(t,null==u?"e"===r?" ":"0":u)),o.push(r),c=a+1);return o.push(n.substring(c,a)),o.join("")}var e=n.length;return t.parse=function(t){var e={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},r=fo(e,n,t,0);if(r!=t.length)return null;"p"in e&&(e.H=e.H%12+12*e.p);var u=null!=e.Z&&Ds!==ao,i=new(u?ao:Ds);return"j"in e?i.setFullYear(e.y,0,e.j):"w"in e&&("W"in e||"U"in e)?(i.setFullYear(e.y,0,1),i.setFullYear(e.y,0,"W"in e?(e.w+6)%7+7*e.W-(i.getDay()+5)%7:e.w+7*e.U-(i.getDay()+6)%7)):i.setFullYear(e.y,e.m,e.d),i.setHours(e.H+Math.floor(e.Z/100),e.M+e.Z%100,e.S,e.L),u?i._:i},t.toString=function(){return n},t}function fo(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=el[o in nl?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function ho(n){return new RegExp("^(?:"+n.map($o.requote).join("|")+")","i")}function go(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function po(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function vo(n,t,e){$s.lastIndex=0;var r=$s.exec(t.substring(e));return r?(n.w=Bs.get(r[0].toLowerCase()),e+r[0].length):-1}function mo(n,t,e){Vs.lastIndex=0;var r=Vs.exec(t.substring(e));return r?(n.w=Xs.get(r[0].toLowerCase()),e+r[0].length):-1}function yo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function xo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Mo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function _o(n,t,e){Gs.lastIndex=0;var r=Gs.exec(t.substring(e));return r?(n.m=Ks.get(r[0].toLowerCase()),e+r[0].length):-1}function bo(n,t,e){Ws.lastIndex=0;var r=Ws.exec(t.substring(e));return r?(n.m=Js.get(r[0].toLowerCase()),e+r[0].length):-1}function wo(n,t,e){return fo(n,tl.c.toString(),t,e)}function So(n,t,e){return fo(n,tl.x.toString(),t,e)}function ko(n,t,e){return fo(n,tl.X.toString(),t,e)}function Eo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Ao(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+2));return r?(n.y=No(+r[0]),e+r[0].length):-1}function Co(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function No(n){return n+(n>68?1900:2e3)}function Lo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function To(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function qo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function zo(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Ro(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Do(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Po(n,t,e){rl.lastIndex=0;var r=rl.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function Uo(n,t,e){var r=ul.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}function jo(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(aa(t)/60),u=aa(t)%60;return e+po(r,"0",2)+po(u,"0",2)}function Ho(n,t,e){Qs.lastIndex=0;var r=Qs.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function Fo(n){function t(n){try{Ds=ao;var t=new Ds;return t._=n,e(t)}finally{Ds=Date}}var e=lo(n);return t.parse=function(n){try{Ds=ao;var t=e.parse(n);return t&&t._}finally{Ds=Date}},t.toString=e.toString,t}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=$o.bisect(ol,u);return i==ol.length?[t.year,si(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/ol[i-1]<ol[i]/u?i-1:i]:[ll,si(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=ni(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(ri(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=ni(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},ai(r,n)}function Io(n){return new Date(n)}function Zo(n){return function(t){for(var e=n.length-1,r=n[e];!r[1](t);)r=n[--e];return r[0](t)}}function Vo(n){return JSON.parse(n.responseText)}function Xo(n){var t=Jo.createRange();return t.selectNode(Jo.body),t.createContextualFragment(n.responseText)}var $o={version:"3.3.9"};Date.now||(Date.now=function(){return+new Date});var Bo=[].slice,Wo=function(n){return Bo.call(n)},Jo=document,Go=Jo.documentElement,Ko=window;try{Wo(Go.childNodes)[0].nodeType}catch(Qo){Wo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Jo.createElement("div").style.setProperty("opacity",0,"")}catch(na){var ta=Ko.Element.prototype,ea=ta.setAttribute,ra=ta.setAttributeNS,ua=Ko.CSSStyleDeclaration.prototype,ia=ua.setProperty;ta.setAttribute=function(n,t){ea.call(this,n,t+"")},ta.setAttributeNS=function(n,t,e){ra.call(this,n,t,e+"")},ua.setProperty=function(n,t,e){ia.call(this,n,t+"",e)}}$o.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},$o.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},$o.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},$o.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},$o.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},$o.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},$o.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},$o.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r; +return i?u+i*(n[r]-u):u},$o.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?$o.quantile(t.sort($o.ascending),.5):void 0},$o.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var oa=$o.bisector(function(n){return n});$o.bisectLeft=oa.left,$o.bisect=$o.bisectRight=oa.right,$o.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},$o.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},$o.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},$o.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=$o.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},$o.transpose=function(n){return $o.zip.apply($o,n)},$o.keys=function(n){var t=[];for(var e in n)t.push(e);return t},$o.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},$o.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},$o.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var aa=Math.abs;$o.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(aa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},$o.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:function(n){return ca+n in this},get:function(n){return this[ca+n]},set:function(n,t){return this[ca+n]=t},remove:function(n){return n=ca+n,n in this&&delete this[n]},keys:function(){var n=[];return this.forEach(function(t){n.push(t)}),n},values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},forEach:function(n){for(var t in this)t.charCodeAt(0)===sa&&n.call(this,t.substring(1),this[t])}});var ca="\x00",sa=ca.charCodeAt(0);$o.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n($o.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},$o.set=function(n){var t=new i;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(i,{has:function(n){return ca+n in this},add:function(n){return this[ca+n]=!0,n},remove:function(n){return n=ca+n,n in this&&delete this[n]},values:function(){var n=[];return this.forEach(function(t){n.push(t)}),n},forEach:function(n){for(var t in this)t.charCodeAt(0)===sa&&n.call(this,t.substring(1))}}),$o.behavior={},$o.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=o(n,t,t[e]);return n};var la=["webkit","ms","moz","Moz","o","O"];$o.dispatch=function(){for(var n=new s,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=l(n);return n},s.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},$o.event=null,$o.requote=function(n){return n.replace(fa,"\\$&")};var fa=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,ha={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ga=function(n,t){return t.querySelector(n)},pa=function(n,t){return t.querySelectorAll(n)},va=Go[a(Go,"matchesSelector")],da=function(n,t){return va.call(n,t)};"function"==typeof Sizzle&&(ga=function(n,t){return Sizzle(n,t)[0]||null},pa=function(n,t){return Sizzle.uniqueSort(Sizzle(n,t))},da=Sizzle.matchesSelector),$o.selection=function(){return Ma};var ma=$o.selection.prototype=[];ma.select=function(n){var t,e,r,u,i=[];n=v(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return p(i)},ma.selectAll=function(n){var t,e,r=[];n=d(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Wo(n.call(e,e.__data__,a,u))),t.parentNode=e);return p(r)};var ya={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};$o.ns={prefix:ya,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ya.hasOwnProperty(e)?{space:ya[e],local:n}:n}},ma.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=$o.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(m(t,n[t]));return this}return this.each(m(n,t))},ma.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=n.trim().split(/^|\s+/g)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!x(n[u]).test(t))return!1;return!0}for(t in n)this.each(M(t,n[t]));return this}return this.each(M(n,t))},ma.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(b(e,n[e],t));return this}if(2>r)return Ko.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(b(n,t,e))},ma.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(w(t,n[t]));return this}return this.each(w(n,t))},ma.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},ma.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},ma.append=function(n){return n=S(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},ma.insert=function(n,t){return n=S(n),t=v(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},ma.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},ma.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=k(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=k(o);for(;f>r;++r)p[r]=k(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=N([]),s=p([]),l=p([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},ma.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},ma.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=E(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a)&&t.push(r)}return p(u)},ma.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},ma.sort=function(n){n=A.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},ma.each=function(n){return C(this,function(t,e,r){n.call(t,t.__data__,e,r)})},ma.call=function(n){var t=Wo(arguments);return n.apply(t[0]=this,t),this},ma.empty=function(){return!this.node()},ma.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},ma.size=function(){var n=0;return this.each(function(){++n}),n};var xa=[];$o.selection.enter=N,$o.selection.enter.prototype=xa,xa.append=ma.append,xa.empty=ma.empty,xa.node=ma.node,xa.call=ma.call,xa.size=ma.size,xa.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return p(o)},xa.insert=function(n,t){return arguments.length<2&&(t=L(this)),ma.insert.call(this,n,t)},ma.transition=function(){for(var n,t,e=Ss||++Ns,r=[],u=ks||{time:Date.now(),ease:Hr,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&uo(t,c,e,u),n.push(t)}return to(r,e)},ma.interrupt=function(){return this.each(T)},$o.select=function(n){var t=["string"==typeof n?ga(n,Jo):n];return t.parentNode=Go,p([t])},$o.selectAll=function(n){var t=Wo("string"==typeof n?pa(n,Jo):n);return t.parentNode=Go,p([t])};var Ma=$o.select(Go);ma.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(q(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(q(n,t,e))};var _a=$o.map({mouseenter:"mouseover",mouseleave:"mouseout"});_a.forEach(function(n){"on"+n in Jo&&_a.remove(n)});var ba="onselectstart"in Jo?null:a(Go.style,"userSelect"),wa=0;$o.mouse=function(n){return P(n,h())};var Sa=/WebKit/.test(Ko.navigator.userAgent)?-1:0;$o.touches=function(n,t){return arguments.length<2&&(t=h().touches),t?Wo(t).map(function(t){var e=P(n,t);return e.identifier=t.identifier,e}):[]},$o.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return $o.event.changedTouches[0].identifier}function e(n,t){return $o.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&$o.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=$o.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=$o.select(Ko).on(e+"."+p,o).on(r+"."+p,a),y=D();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=g(n,"drag","dragstart","dragend"),i=null,o=r(c,$o.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},$o.rebind(n,u,"on")};var ka=Math.PI,Ea=2*ka,Aa=ka/2,Ca=1e-6,Na=Ca*Ca,La=ka/180,Ta=180/ka,qa=Math.SQRT2,za=2,Ra=4;$o.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=O(v),o=i/(za*h)*(e*Y(qa*t+v)-F(v));return[r+o*s,u+o*l,i*e/O(qa*t+v)]}return[r+n*s,u+n*l,i*Math.exp(qa*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+Ra*f)/(2*i*za*h),p=(c*c-i*i-Ra*f)/(2*c*za*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/qa;return e.duration=1e3*y,e},$o.behavior.zoom=function(){function n(n){n.on(A,s).on(Ua+".zoom",h).on(C,p).on("dblclick.zoom",v).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u($o.mouse(r),h),a(i)}function e(){f.on(C,Ko===r?p:null).on(N,null),g(l&&$o.event.target===s),c(i)}var r=this,i=q.of(r,arguments),s=$o.event.target,l=0,f=$o.select(Ko).on(C,n).on(N,e),h=t($o.mouse(r)),g=D();T.call(r),o(i)}function l(){function n(){var n=$o.touches(p);return g=S.k,n.forEach(function(n){n.identifier in d&&(d[n.identifier]=t(n))}),n}function e(){for(var t=$o.event.changedTouches,e=0,i=t.length;i>e;++e)d[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=d[s.identifier];r(2*S.k),u(s,l),f(),a(v)}x=c}else if(o.length>1){var s=o[0],h=o[1],g=s[0]-h[0],p=s[1]-h[1];m=g*g+p*p}}function i(){for(var n,t,e,i,o=$o.touches(p),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=d[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*g)}x=null,u(n,t),a(v)}function h(){if($o.event.touches.length){for(var t=$o.event.changedTouches,e=0,r=t.length;r>e;++e)delete d[t[e].identifier];for(var u in d)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(v)}var g,p=this,v=q.of(p,arguments),d={},m=0,y=$o.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=$o.select(Ko).on(M,i).on(_,h),w=$o.select(p).on(A,null).on(L,e),k=D();T.call(p),e(),o(v)}function h(){var n=q.of(this,arguments);y?clearTimeout(y):(T.call(this),o(n)),y=setTimeout(function(){y=null,c(n)},50),f();var e=m||$o.mouse(this);d||(d=t(e)),r(Math.pow(2,.002*Da())*S.k),u(e,d),a(n)}function p(){d=null}function v(){var n=q.of(this,arguments),e=$o.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,$o.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var d,m,y,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Pa,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",q=g(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=q.of(this,arguments),t=S;Ss?$o.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=$o.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Pa:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(m=t&&[+t[0],+t[1]],n):m},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},$o.rebind(n,q,"on")};var Da,Pa=[0,1/0],Ua="onwheel"in Jo?(Da=function(){return-$o.event.deltaY*($o.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Jo?(Da=function(){return $o.event.wheelDelta},"mousewheel"):(Da=function(){return-$o.event.detail},"MozMousePixelScroll");Z.prototype.toString=function(){return this.rgb()+""},$o.hsl=function(n,t,e){return 1===arguments.length?n instanceof X?V(n.h,n.s,n.l):st(""+n,lt,V):V(+n,+t,+e)};var ja=X.prototype=new Z;ja.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),V(this.h,this.s,this.l/n)},ja.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),V(this.h,this.s,n*this.l)},ja.rgb=function(){return $(this.h,this.s,this.l)},$o.hcl=function(n,t,e){return 1===arguments.length?n instanceof W?B(n.h,n.c,n.l):n instanceof K?nt(n.l,n.a,n.b):nt((n=ft((n=$o.rgb(n)).r,n.g,n.b)).l,n.a,n.b):B(+n,+t,+e)};var Ha=W.prototype=new Z;Ha.brighter=function(n){return B(this.h,this.c,Math.min(100,this.l+Fa*(arguments.length?n:1)))},Ha.darker=function(n){return B(this.h,this.c,Math.max(0,this.l-Fa*(arguments.length?n:1)))},Ha.rgb=function(){return J(this.h,this.c,this.l).rgb()},$o.lab=function(n,t,e){return 1===arguments.length?n instanceof K?G(n.l,n.a,n.b):n instanceof W?J(n.l,n.c,n.h):ft((n=$o.rgb(n)).r,n.g,n.b):G(+n,+t,+e)};var Fa=18,Oa=.95047,Ya=1,Ia=1.08883,Za=K.prototype=new Z;Za.brighter=function(n){return G(Math.min(100,this.l+Fa*(arguments.length?n:1)),this.a,this.b)},Za.darker=function(n){return G(Math.max(0,this.l-Fa*(arguments.length?n:1)),this.a,this.b)},Za.rgb=function(){return Q(this.l,this.a,this.b)},$o.rgb=function(n,t,e){return 1===arguments.length?n instanceof at?ot(n.r,n.g,n.b):st(""+n,ot,$):ot(~~n,~~t,~~e)};var Va=at.prototype=new Z;Va.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),ot(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):ot(u,u,u)},Va.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),ot(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Va.hsl=function(){return lt(this.r,this.g,this.b)},Va.toString=function(){return"#"+ct(this.r)+ct(this.g)+ct(this.b)};var Xa=$o.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Xa.forEach(function(n,t){Xa.set(n,ut(t))}),$o.functor=pt,$o.xhr=dt(vt),$o.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=$o.xhr(n,t,i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o.row(e)}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function o(t){return t.map(a).join(n)}function a(n){return c.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var c=new RegExp('["'+n+"\n]"),s=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=c)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<c;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;c>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==s)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],c=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new i,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(a).join(n)].concat(t.map(function(t){return u.map(function(n){return a(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(o).join("\n")},e},$o.csv=$o.dsv(",","text/csv"),$o.tsv=$o.dsv(" ","text/tab-separated-values");var $a,Ba,Wa,Ja,Ga,Ka=Ko[a(Ko,"requestAnimationFrame")]||function(n){setTimeout(n,17)};$o.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};Ba?Ba.n=i:$a=i,Ba=i,Wa||(Ja=clearTimeout(Ja),Wa=1,Ka(xt))},$o.timer.flush=function(){Mt(),_t()};var Qa=".",nc=",",tc=[3,3],ec="$",rc=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(bt);$o.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=$o.round(n,wt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),rc[8+e/3]},$o.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)},$o.format=function(n){var t=uc.exec(n),e=t[1]||" ",r=t[2]||">",u=t[3]||"",i=t[4]||"",o=t[5],a=+t[6],c=t[7],s=t[8],l=t[9],f=1,h="",g=!1;switch(s&&(s=+s.substring(1)),(o||"0"===e&&"="===r)&&(o=e="0",r="=",c&&(a-=Math.floor((a-1)/4))),l){case"n":c=!0,l="g";break;case"%":f=100,h="%",l="f";break;case"p":f=100,h="%",l="r";break;case"b":case"o":case"x":case"X":"#"===i&&(i="0"+l.toLowerCase());case"c":case"d":g=!0,s=0;break;case"s":f=-1,l="r"}"#"===i?i="":"$"===i&&(i=ec),"r"!=l||s||(l="g"),null!=s&&("g"==l?s=Math.max(1,Math.min(21,s)):("e"==l||"f"==l)&&(s=Math.max(0,Math.min(20,s)))),l=ic.get(l)||St;var p=o&&c;return function(n){if(g&&n%1)return"";var t=0>n||0===n&&0>1/n?(n=-n,"-"):u;if(0>f){var v=$o.formatPrefix(n,s);n=v.scale(n),h=v.symbol}else n*=f;n=l(n,s);var d=n.lastIndexOf("."),m=0>d?n:n.substring(0,d),y=0>d?"":Qa+n.substring(d+1);!o&&c&&(m=oc(m));var x=i.length+m.length+y.length+(p?0:t.length),M=a>x?new Array(x=a-x+1).join(e):"";return p&&(m=oc(M+m)),t+=i,n=m+y,("<"===r?t+n+M:">"===r?M+t+n:"^"===r?M.substring(0,x>>=1)+t+n+M.substring(x):t+(p?n:M+n))+h}};var uc=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,ic=$o.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=$o.round(n,wt(n,t))).toFixed(Math.max(0,Math.min(20,wt(n*(1+1e-15),t))))}}),oc=vt;if(tc){var ac=tc.length;oc=function(n){for(var t=n.length,e=[],r=0,u=tc[0];t>0&&u>0;)e.push(n.substring(t-=u,t+u)),u=tc[r=(r+1)%ac];return e.reverse().join(nc)}}$o.geo={},kt.prototype={s:0,t:0,add:function(n){Et(n,this.t,cc),Et(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new kt;$o.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):At(n,t)};var sc={Feature:function(n,t){At(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)At(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){Ct(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)Ct(e[r],t,0)},Polygon:function(n,t){Nt(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)Nt(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)At(e[r],t)}};$o.geo.area=function(n){return fc=0,$o.geo.stream(n,gc),fc};var fc,hc=new kt,gc={sphere:function(){fc+=4*ka},point:c,lineStart:c,lineEnd:c,polygonStart:function(){hc.reset(),gc.lineStart=Lt},polygonEnd:function(){var n=2*hc;fc+=0>n?4*ka+n:n,gc.lineStart=gc.lineEnd=gc.point=c}};$o.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=Tt([t*La,e*La]);if(m){var u=zt(m,r),i=[u[1],-u[0],0],o=zt(i,u);Pt(o),o=Ut(o);var c=t-p,s=c>0?1:-1,v=o[0]*Ta*s,d=aa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*Ta;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*Ta;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=aa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),aa(y)>Ca&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Ca?g=90:-Ca>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],$o.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),$o.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,$o.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Na>u&&(t=xc,e=Mc,r=_c,Ca>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Na>u)?[0/0,0/0]:[Math.atan2(e,t)*Ta,H(r/Math.sqrt(u))*Ta]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:c,point:Ht,lineStart:Ot,lineEnd:Yt,polygonStart:function(){kc.lineStart=It},polygonEnd:function(){kc.lineStart=Ot}},Ec=Bt(Zt,Qt,te,[-ka,-ka/2]),Ac=1e9;$o.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=ue(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},($o.geo.conicEqualArea=function(){return oe(ae)}).raw=ae,$o.geo.albers=function(){return $o.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},$o.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=$o.geo.albers(),o=$o.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=$o.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Ca,f+.12*s+Ca],[l-.214*s-Ca,f+.234*s-Ca]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Ca,f+.166*s+Ca],[l-.115*s-Ca,f+.234*s-Ca]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:c,lineStart:c,lineEnd:c,polygonStart:function(){Nc=0,Rc.lineStart=ce},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=c,Cc+=aa(Nc/2)}},Dc={point:se,lineStart:c,lineEnd:c,polygonStart:c,polygonEnd:c},Pc={point:he,lineStart:ge,lineEnd:pe,polygonStart:function(){Pc.lineStart=ve},polygonEnd:function(){Pc.point=he,Pc.lineStart=ge,Pc.lineEnd=pe}};$o.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),$o.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,$o.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,$o.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),$o.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||ye(n):vt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new le:new de(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection($o.geo.albersUsa()).context(null)},$o.geo.transform=function(n){return{stream:function(t){var e=new xe(t);for(var r in n)e[r]=n[r];return e}}},xe.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart() +},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},$o.geo.projection=_e,$o.geo.projectionMutator=be,($o.geo.equirectangular=function(){return _e(Se)}).raw=Se.invert=Se,$o.geo.rotation=function(n){function t(t){return t=n(t[0]*La,t[1]*La),t[0]*=Ta,t[1]*=Ta,t}return n=Ee(n[0]%360*La,n[1]*La,n.length>2?n[2]*La:0),t.invert=function(t){return t=n.invert(t[0]*La,t[1]*La),t[0]*=Ta,t[1]*=Ta,t},t},ke.invert=Se,$o.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=Ee(-n[0]*La,-n[1]*La,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=Ta,n[1]*=Ta}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=Le((t=+r)*La,u*La),n):t},n.precision=function(r){return arguments.length?(e=Le(t*La,(u=+r)*La),n):u},n.angle(90)},$o.geo.distance=function(n,t){var e,r=(t[0]-n[0])*La,u=n[1]*La,i=t[1]*La,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},$o.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return $o.range(Math.ceil(i/d)*d,u,d).map(h).concat($o.range(Math.ceil(s/m)*m,c,m).map(g)).concat($o.range(Math.ceil(r/p)*p,e,p).filter(function(n){return aa(n%d)>Ca}).map(l)).concat($o.range(Math.ceil(a/v)*v,o,v).filter(function(n){return aa(n%m)>Ca}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=qe(a,o,90),f=ze(r,e,y),h=qe(s,c,90),g=ze(i,u,y),n):y},n.majorExtent([[-180,-90+Ca],[180,90-Ca]]).minorExtent([[-180,-80-Ca],[180,80+Ca]])},$o.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=Re,u=De;return n.distance=function(){return $o.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},$o.geo.interpolate=function(n,t){return Pe(n[0]*La,n[1]*La,t[0]*La,t[1]*La)},$o.geo.length=function(n){return Uc=0,$o.geo.stream(n,jc),Uc};var Uc,jc={sphere:c,point:c,lineStart:Ue,lineEnd:c,polygonStart:c,polygonEnd:c},Hc=je(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});($o.geo.azimuthalEqualArea=function(){return _e(Hc)}).raw=Hc;var Fc=je(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},vt);($o.geo.azimuthalEquidistant=function(){return _e(Fc)}).raw=Fc,($o.geo.conicConformal=function(){return oe(He)}).raw=He,($o.geo.conicEquidistant=function(){return oe(Fe)}).raw=Fe;var Oc=je(function(n){return 1/n},Math.atan);($o.geo.gnomonic=function(){return _e(Oc)}).raw=Oc,Oe.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Aa]},($o.geo.mercator=function(){return Ye(Oe)}).raw=Oe;var Yc=je(function(){return 1},Math.asin);($o.geo.orthographic=function(){return _e(Yc)}).raw=Yc;var Ic=je(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});($o.geo.stereographic=function(){return _e(Ic)}).raw=Ic,Ie.invert=function(n,t){return[Math.atan2(F(n),Math.cos(t)),H(Math.sin(t)/O(n))]},($o.geo.transverseMercator=function(){return Ye(Ie)}).raw=Ie,$o.geom={},$o.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u,i,o,a,c,s,l,f,h,g,p,v=pt(e),d=pt(r),m=n.length,y=m-1,x=[],M=[],_=0;if(v===Ze&&r===Ve)t=n;else for(i=0,t=[];m>i;++i)t.push([+v.call(this,u=n[i],i),+d.call(this,u,i)]);for(i=1;m>i;++i)(t[i][1]<t[_][1]||t[i][1]==t[_][1]&&t[i][0]<t[_][0])&&(_=i);for(i=0;m>i;++i)i!==_&&(c=t[i][1]-t[_][1],a=t[i][0]-t[_][0],x.push({angle:Math.atan2(c,a),index:i}));for(x.sort(function(n,t){return n.angle-t.angle}),g=x[0].angle,h=x[0].index,f=0,i=1;y>i;++i){if(o=x[i].index,g==x[i].angle){if(a=t[h][0]-t[_][0],c=t[h][1]-t[_][1],s=t[o][0]-t[_][0],l=t[o][1]-t[_][1],a*a+c*c>=s*s+l*l){x[i].index=-1;continue}x[f].index=-1}g=x[i].angle,f=i,h=o}for(M.push(_),i=0,o=0;2>i;++o)x[o].index>-1&&(M.push(x[o].index),i++);for(p=M.length;y>o;++o)if(!(x[o].index<0)){for(;!Xe(M[p-2],M[p-1],x[o].index,t);)--p;M[p++]=x[o].index}var b=[];for(i=p-1;i>=0;--i)b.push(n[M[i]]);return b}var e=Ze,r=Ve;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},$o.geom.polygon=function(n){return ha(n,Zc),n};var Zc=$o.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=We(n),c=-1,s=this.length-We(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],$e(o,l,u)?($e(i,l,u)||n.push(Be(i,o,l,u)),n.push(o)):$e(i,l,u)&&n.push(Be(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];rr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(ir),t.length},vr.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},dr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Mr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(yr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,xr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(xr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,yr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Mr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,yr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,xr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,yr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,xr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,yr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,xr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},$o.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return _r(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Ca)*Ca,y:Math.round(o(n,t)/Ca)*Ca,i:t}})}var r=Ze,u=Ve,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return _r(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return _r(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(ir),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&wr(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=pt(r=n),t):r},t.y=function(n){return arguments.length?(o=pt(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];$o.geom.delaunay=function(n){return $o.geom.voronoi().triangles(n)},$o.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(aa(c-e)+aa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=Er()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=pt(a),M=pt(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=Er();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){Ar(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=Ze,c=Ve;return(o=arguments.length)?(a=Sr,c=kr,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},$o.interpolateRgb=Cr,$o.interpolateObject=Nr,$o.interpolateNumber=Lr,$o.interpolateString=Tr;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;$o.interpolate=qr,$o.interpolators=[function(n,t){var e=typeof t;return("string"===e?Xa.has(t)||/^(#|rgb\(|hsl\()/.test(t)?Cr:Tr:t instanceof Z?Cr:"object"===e?Array.isArray(t)?zr:Nr:Lr)(n,t)}],$o.interpolateArray=zr;var ns=function(){return vt},ts=$o.map({linear:ns,poly:Fr,quad:function(){return Ur},cubic:function(){return jr},sin:function(){return Or},exp:function(){return Yr},circle:function(){return Ir},elastic:Zr,back:Vr,bounce:function(){return Xr}}),es=$o.map({"in":vt,out:Dr,"in-out":Pr,"out-in":function(n){return Pr(Dr(n))}});$o.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||vt,Rr(r(e.apply(null,Bo.call(arguments,1))))},$o.interpolateHcl=$r,$o.interpolateHsl=Br,$o.interpolateLab=Wr,$o.interpolateRound=Jr,$o.transform=function(n){var t=Jo.createElementNS($o.ns.prefix.svg,"g");return($o.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Gr(e?e.matrix:rs)})(n)},Gr.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};$o.interpolateTransform=tu,$o.layout={},$o.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(uu(n[e]));return t}},$o.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=$o.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push($o.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(Ea-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},$o.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=1/Math.sqrt(i*i+o*o);if(v>(u-e)*a){var c=t.charge*a*a;return n.px-=i*c,n.py-=o*c,!0}if(t.point&&isFinite(a)){var c=t.pointCharge*a*a;n.px-=i*c,n.py-=o*c}}return!t.charge}}function t(n){n.px=$o.event.x,n.py=$o.event.y,a.resume()}var e,r,u,i,o,a={},c=$o.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=.1,v=.8,d=[],m=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,v,y,x,M,_=d.length,b=m.length;for(e=0;b>e;++e)a=m[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(v=x*x+M*M)&&(v=r*i[e]*((v=Math.sqrt(v))-u[e])/v,x*=v,M*=v,h.x-=x*(y=f.weight/(h.weight+f.weight)),h.y-=M*y,f.x+=x*(y=1-y),f.y+=M*y);if((y=r*p)&&(x=s[0]/2,M=s[1]/2,e=-1,y))for(;++e<_;)a=d[e],a.x+=(x-a.x)*y,a.y+=(M-a.y)*y;if(g)for(fu(t=$o.geom.quadtree(d),r,o),e=-1;++e<_;)(a=d[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=d[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(d=n,a):d},a.links=function(n){return arguments.length?(m=n,a):m},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.gravity=function(n){return arguments.length?(p=+n,a):p},a.theta=function(n){return arguments.length?(v=+n,a):v},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),$o.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=m[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=d.length,l=m.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=d[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=m[t],"number"==typeof r.source&&(r.source=d[r.source]),"number"==typeof r.target&&(r.target=d[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=d[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,m[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,m[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,d[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=$o.behavior.drag().origin(vt).on("dragstart.force",au).on("drag.force",t).on("dragend.force",cu)),arguments.length?(this.on("mouseover.force",su).on("mouseout.force",lu).call(e),void 0):e},$o.rebind(a,c,"on")};var us=20,is=1;$o.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=vu,u=gu,i=pu;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},$o.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=$o.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},hu(e,r)},$o.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/$o.sum(o),s=$o.range(i.length);null!=e&&s.sort(e===os?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=os,r=0,u=Ea;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var os={};$o.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=$o.permute(s,f),l=$o.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=vt,e=Mu,r=_u,u=xu,i=mu,o=yu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:as.get(t)||Mu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:cs.get(t)||_u,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var as=$o.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(bu),i=n.map(wu),o=$o.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return $o.range(n.length).reverse()},"default":Mu}),cs=$o.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:_u});$o.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[$o.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=Au,u=ku;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=pt(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return Eu(n,t)}:pt(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},$o.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;Pu(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=Lu(a),i=Nu(i),a&&i;)c=Nu(c),o=Lu(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(Uu(ju(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!Lu(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!Nu(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];Du(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=Tu(l,zu),h=Tu(l,qu),g=Tu(l,Ru),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return Du(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=$o.layout.hierarchy().sort(null).value(null),e=Cu,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},hu(n,t)},$o.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,Du(a,function(n){n.r=+l(n.value)}),Du(a,Iu),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;Du(a,function(n){n.r+=f}),Du(a,Iu),Du(a,function(n){n.r-=f})}return Xu(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=$o.layout.hierarchy().sort(Hu),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},hu(n,e)},$o.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;Du(c,function(n){var t=n.children;t&&t.length?(n.x=Wu(t),n.y=Bu(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ju(c),f=Gu(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return Du(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=$o.layout.hierarchy().sort(null).value(null),e=Cu,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},hu(n,t)},$o.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=$o.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ku,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ku(t):Qu(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return Qu(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ku:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},hu(i,a)},$o.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=$o.random.normal.apply($o,arguments);return function(){return Math.exp(n())}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t/n}}},$o.scale={};var ss={floor:vt,ceil:vt};$o.scale.linear=function(){return oi([0,1],[0,1],qr,!1)};var ls={s:1,g:1,p:1,r:1,e:1};$o.scale.log=function(){return pi($o.scale.linear().domain([0,1]),10,!0,[1,10])};var fs=$o.format(".0e"),hs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};$o.scale.pow=function(){return vi($o.scale.linear(),1,[0,1])},$o.scale.sqrt=function(){return $o.scale.pow().exponent(.5)},$o.scale.ordinal=function(){return mi([],{t:"range",a:[[]]})},$o.scale.category10=function(){return $o.scale.ordinal().range(gs)},$o.scale.category20=function(){return $o.scale.ordinal().range(ps)},$o.scale.category20b=function(){return $o.scale.ordinal().range(vs)},$o.scale.category20c=function(){return $o.scale.ordinal().range(ds)};var gs=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(it),ps=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(it),vs=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(it),ds=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(it);$o.scale.quantile=function(){return yi([],[])},$o.scale.quantize=function(){return xi(0,1,[0,1])},$o.scale.threshold=function(){return Mi([.5],[0,1])},$o.scale.identity=function(){return _i([0,1])},$o.svg={},$o.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ms,a=u.apply(this,arguments)+ms,c=(o>a&&(c=o,o=a,a=c),a-o),s=ka>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=ys?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=bi,e=wi,r=Si,u=ki;return n.innerRadius=function(e){return arguments.length?(t=pt(e),n):t},n.outerRadius=function(t){return arguments.length?(e=pt(t),n):e},n.startAngle=function(t){return arguments.length?(r=pt(t),n):r},n.endAngle=function(t){return arguments.length?(u=pt(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ms;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ms=-Aa,ys=Ea-Ca;$o.svg.line=function(){return Ei(vt)};var xs=$o.map({linear:Ai,"linear-closed":Ci,step:Ni,"step-before":Li,"step-after":Ti,basis:Ui,"basis-open":ji,"basis-closed":Hi,bundle:Fi,cardinal:Ri,"cardinal-open":qi,"cardinal-closed":zi,monotone:Xi});xs.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var Ms=[0,2/3,1/3,0],_s=[0,1/3,2/3,0],bs=[0,1/6,2/3,1/6];$o.svg.line.radial=function(){var n=Ei($i);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},Li.reverse=Ti,Ti.reverse=Li,$o.svg.area=function(){return Bi(vt)},$o.svg.area.radial=function(){var n=Bi($i);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},$o.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ms,l=s.call(n,u,r)+ms;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>ka)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=Re,o=De,a=Wi,c=Si,s=ki;return n.radius=function(t){return arguments.length?(a=pt(t),n):a},n.source=function(t){return arguments.length?(i=pt(t),n):i},n.target=function(t){return arguments.length?(o=pt(t),n):o},n.startAngle=function(t){return arguments.length?(c=pt(t),n):c},n.endAngle=function(t){return arguments.length?(s=pt(t),n):s},n},$o.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=Re,e=De,r=Ji;return n.source=function(e){return arguments.length?(t=pt(e),n):t},n.target=function(t){return arguments.length?(e=pt(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},$o.svg.diagonal.radial=function(){var n=$o.svg.diagonal(),t=Ji,e=n.projection;return n.projection=function(n){return arguments.length?e(Gi(t=n)):t},n},$o.svg.symbol=function(){function n(n,r){return(ws.get(t.call(this,n,r))||no)(e.call(this,n,r))}var t=Qi,e=Ki;return n.type=function(e){return arguments.length?(t=pt(e),n):t},n.size=function(t){return arguments.length?(e=pt(t),n):e},n};var ws=$o.map({circle:no,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*As)),e=t*As;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/Es),e=t*Es/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/Es),e=t*Es/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});$o.svg.symbolTypes=ws.keys();var Ss,ks,Es=Math.sqrt(3),As=Math.tan(30*La),Cs=[],Ns=0; +Cs.call=ma.call,Cs.empty=ma.empty,Cs.node=ma.node,Cs.size=ma.size,$o.transition=function(n){return arguments.length?Ss?n.transition():n:Ma.transition()},$o.transition.prototype=Cs,Cs.select=function(n){var t,e,r,u=this.id,i=[];n=v(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),uo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return to(i,u)},Cs.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=d(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&uo(u,g,o,i),t.push(u)}return to(a,o)},Cs.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=E(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a)&&t.push(r)}return to(u,this.id)},Cs.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):C(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Cs.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?tu:qr,a=$o.ns.qualify(n);return eo(this,"attr."+n,t,a.local?i:u)},Cs.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=$o.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Cs.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Ko.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=qr(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return eo(this,"style."+n,t,u)},Cs.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Ko.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Cs.text=function(n){return eo(this,"text",n,ro)},Cs.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Cs.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=$o.ease.apply($o,arguments)),C(this,function(e){e.__transition__[t].ease=n}))},Cs.delay=function(n){var t=this.id;return C(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Cs.duration=function(n){var t=this.id;return C(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Cs.each=function(n,t){var e=this.id;if(arguments.length<2){var r=ks,u=Ss;Ss=e,C(this,function(t,r,u){ks=t.__transition__[e],n.call(t,t.__data__,r,u)}),ks=r,Ss=u}else C(this,function(r){var u=r.__transition__[e];(u.event||(u.event=$o.dispatch("start","end"))).on(n,t)});return this},Cs.transition=function(){for(var n,t,e,r,u=this.id,i=++Ns,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,uo(e,s,i,r)),n.push(e)}return to(o,i)},$o.svg.axis=function(){function n(n){n.each(function(){var n,s=$o.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):vt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Ca),d=$o.transition(p.exit()).style("opacity",Ca).remove(),m=$o.transition(p).style("opacity",1),y=ti(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),$o.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=io,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=io,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=oo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=oo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f.rangeBand()/2,A=function(n){return f(n)+E};v.call(n,A),m.call(n,A)}else v.call(n,l),m.call(n,f),d.call(n,f)})}var t,e=$o.scale.linear(),r=Ls,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in Ts?t+"":Ls,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ls="bottom",Ts={top:1,right:1,bottom:1,left:1};$o.svg.brush=function(){function n(i){i.each(function(){var i=$o.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(d,vt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return qs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=$o.transition(i),h=$o.transition(o);c&&(l=ti(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=ti(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+h[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",h[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",h[1]-h[0])}function u(){function u(){32==$o.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=h[1],C=2),f())}function g(){32==$o.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=h[1],C=0,f())}function d(){var n=$o.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||($o.event.altKey?(x||(x=[(l[0]+l[1])/2,(h[0]+h[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=h[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=ti(t),c=a[0],s=a[1],f=L[e],g=e?h:l,d=g[1]-g[0];return C&&(c-=f,s-=d+f),r=(e?v:p)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=f)+d:(x&&(f=Math.max(c,Math.min(s,2*x[e]-r))),r>f?(u=r,r=f):u=f),g[0]!=r||g[1]!=u?(e?o=null:i=null,g[0]=r,g[1]=u,!0):void 0}function y(){d(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),$o.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=$o.select($o.event.target),w=a.of(_,arguments),S=$o.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=D(),L=$o.mouse(_),T=$o.select(Ko).on("keydown.brush",u).on("keyup.brush",g);if($o.event.changedTouches?T.on("touchmove.brush",d).on("touchend.brush",y):T.on("mousemove.brush",d).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=h[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],h[1-z]-L[1]],L[0]=l[q],L[1]=h[z]}else $o.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),$o.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),d()}var i,o,a=g(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],h=[0,0],p=!0,v=!0,d=zs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:h,i:i,j:o},e=this.__chart__||t;this.__chart__=t,Ss?$o.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,h=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=zr(l,t.x),r=zr(h,t.y);return i=o=null,function(u){l=t.x=e(u),h=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,d=zs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,d=zs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(p=!!t[0],v=!!t[1]):c?p=!!t:s&&(v=!!t),n):c&&s?[p,v]:c?p:s?v:null},n.extent=function(t){var e,r,u,a,f;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(f=e,e=r,r=f),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(f=u,u=a,a=f),(u!=h[0]||a!=h[1])&&(h=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(f=e,e=r,r=f))),s&&(o?(u=o[0],a=o[1]):(u=h[0],a=h[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(f=u,u=a,a=f))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],h=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&h[0]==h[1]},$o.rebind(n,a,"on")};var qs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},zs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Rs=$o.time={},Ds=Date,Ps=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"];ao.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){Us.setUTCDate.apply(this._,arguments)},setDay:function(){Us.setUTCDay.apply(this._,arguments)},setFullYear:function(){Us.setUTCFullYear.apply(this._,arguments)},setHours:function(){Us.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){Us.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){Us.setUTCMinutes.apply(this._,arguments)},setMonth:function(){Us.setUTCMonth.apply(this._,arguments)},setSeconds:function(){Us.setUTCSeconds.apply(this._,arguments)},setTime:function(){Us.setTime.apply(this._,arguments)}};var Us=Date.prototype,js="%a %b %e %X %Y",Hs="%m/%d/%Y",Fs="%H:%M:%S",Os=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],Ys=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],Is=["January","February","March","April","May","June","July","August","September","October","November","December"],Zs=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];Rs.year=co(function(n){return n=Rs.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),Rs.years=Rs.year.range,Rs.years.utc=Rs.year.utc.range,Rs.day=co(function(n){var t=new Ds(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),Rs.days=Rs.day.range,Rs.days.utc=Rs.day.utc.range,Rs.dayOfYear=function(n){var t=Rs.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},Ps.forEach(function(n,t){n=n.toLowerCase(),t=7-t;var e=Rs[n]=co(function(n){return(n=Rs.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=Rs.year(n).getDay();return Math.floor((Rs.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});Rs[n+"s"]=e.range,Rs[n+"s"].utc=e.utc.range,Rs[n+"OfYear"]=function(n){var e=Rs.year(n).getDay();return Math.floor((Rs.dayOfYear(n)+(e+t)%7)/7)}}),Rs.week=Rs.sunday,Rs.weeks=Rs.sunday.range,Rs.weeks.utc=Rs.sunday.utc.range,Rs.weekOfYear=Rs.sundayOfYear,Rs.format=lo;var Vs=ho(Os),Xs=go(Os),$s=ho(Ys),Bs=go(Ys),Ws=ho(Is),Js=go(Is),Gs=ho(Zs),Ks=go(Zs),Qs=/^%/,nl={"-":"",_:" ",0:"0"},tl={a:function(n){return Ys[n.getDay()]},A:function(n){return Os[n.getDay()]},b:function(n){return Zs[n.getMonth()]},B:function(n){return Is[n.getMonth()]},c:lo(js),d:function(n,t){return po(n.getDate(),t,2)},e:function(n,t){return po(n.getDate(),t,2)},H:function(n,t){return po(n.getHours(),t,2)},I:function(n,t){return po(n.getHours()%12||12,t,2)},j:function(n,t){return po(1+Rs.dayOfYear(n),t,3)},L:function(n,t){return po(n.getMilliseconds(),t,3)},m:function(n,t){return po(n.getMonth()+1,t,2)},M:function(n,t){return po(n.getMinutes(),t,2)},p:function(n){return n.getHours()>=12?"PM":"AM"},S:function(n,t){return po(n.getSeconds(),t,2)},U:function(n,t){return po(Rs.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return po(Rs.mondayOfYear(n),t,2)},x:lo(Hs),X:lo(Fs),y:function(n,t){return po(n.getFullYear()%100,t,2)},Y:function(n,t){return po(n.getFullYear()%1e4,t,4)},Z:jo,"%":function(){return"%"}},el={a:vo,A:mo,b:_o,B:bo,c:wo,d:To,e:To,H:zo,I:zo,j:qo,L:Po,m:Lo,M:Ro,p:Uo,S:Do,U:xo,w:yo,W:Mo,x:So,X:ko,y:Ao,Y:Eo,Z:Co,"%":Ho},rl=/^\s*\d+/,ul=$o.map({am:0,pm:1});lo.utc=Fo;var il=Fo("%Y-%m-%dT%H:%M:%S.%LZ");lo.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:il,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=il.toString,Rs.second=co(function(n){return new Ds(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),Rs.seconds=Rs.second.range,Rs.seconds.utc=Rs.second.utc.range,Rs.minute=co(function(n){return new Ds(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),Rs.minutes=Rs.minute.range,Rs.minutes.utc=Rs.minute.utc.range,Rs.hour=co(function(n){var t=n.getTimezoneOffset()/60;return new Ds(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),Rs.hours=Rs.hour.range,Rs.hours.utc=Rs.hour.utc.range,Rs.month=co(function(n){return n=Rs.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),Rs.months=Rs.month.range,Rs.months.utc=Rs.month.utc.range;var ol=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],al=[[Rs.second,1],[Rs.second,5],[Rs.second,15],[Rs.second,30],[Rs.minute,1],[Rs.minute,5],[Rs.minute,15],[Rs.minute,30],[Rs.hour,1],[Rs.hour,3],[Rs.hour,6],[Rs.hour,12],[Rs.day,1],[Rs.day,2],[Rs.week,1],[Rs.month,1],[Rs.month,3],[Rs.year,1]],cl=[[lo("%Y"),Zt],[lo("%B"),function(n){return n.getMonth()}],[lo("%b %d"),function(n){return 1!=n.getDate()}],[lo("%a %d"),function(n){return n.getDay()&&1!=n.getDate()}],[lo("%I %p"),function(n){return n.getHours()}],[lo("%I:%M"),function(n){return n.getMinutes()}],[lo(":%S"),function(n){return n.getSeconds()}],[lo(".%L"),function(n){return n.getMilliseconds()}]],sl=Zo(cl);al.year=Rs.year,Rs.scale=function(){return Yo($o.scale.linear(),al,sl)};var ll={range:function(n,t,e){return $o.range(+n,+t,e).map(Io)}},fl=al.map(function(n){return[n[0].utc,n[1]]}),hl=[[Fo("%Y"),Zt],[Fo("%B"),function(n){return n.getUTCMonth()}],[Fo("%b %d"),function(n){return 1!=n.getUTCDate()}],[Fo("%a %d"),function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],[Fo("%I %p"),function(n){return n.getUTCHours()}],[Fo("%I:%M"),function(n){return n.getUTCMinutes()}],[Fo(":%S"),function(n){return n.getUTCSeconds()}],[Fo(".%L"),function(n){return n.getUTCMilliseconds()}]],gl=Zo(hl);return fl.year=Rs.year.utc,Rs.scale.utc=function(){return Yo($o.scale.linear(),fl,gl)},$o.text=dt(function(n){return n.responseText}),$o.json=function(n,t){return mt(n,"application/json",Vo,t)},$o.html=function(n,t){return mt(n,"text/html",Xo,t)},$o.xml=dt(function(n){return n.responseXML}),$o}();</script> +<script type="text/javascript">var RadarChart = { + draw: function(id, d, options){ + var cfg = { + radius: 5, + w: 600, + h: 600, + factor: .95, + factorLegend: 1, + levels: 3, + maxValue: 0, + radians: 2 * Math.PI, + opacityArea: 0.5, + color: d3.scale.category10(), + fontSize: 10 + }; + if('undefined' !== typeof options){ + for(var i in options){ + if('undefined' !== typeof options[i]){ + cfg[i] = options[i]; + } + } + } + cfg.maxValue = Math.max(cfg.maxValue, d3.max(d, function(i){return d3.max(i.map(function(o){return o.value;}))})); + var allAxis = (d[0].map(function(i, j){return i.axis})); + var total = allAxis.length; + var radius = cfg.factor*Math.min(cfg.w/2, cfg.h/2); + d3.select(id).select("svg").remove(); + var g = d3.select(id).append("svg").attr("width", cfg.w).attr("height", 0.75 * cfg.h).append("g"); + + var tooltip; + function getPosition(i, range, factor, func){ + factor = typeof factor !== 'undefined' ? factor : 1; + return range * (1 - factor * func(i * cfg.radians / total)); + } + function getHorizontalPosition(i, range, factor){ + return getPosition(i, range, factor, Math.sin); + } + function getVerticalPosition(i, range, factor){ + return getPosition(i, range, factor, Math.cos); + } + + for(var j=0; j<cfg.levels; j++){ + var levelFactor = radius*((j+1)/cfg.levels); + g.selectAll(".levels").data(allAxis).enter().append("svg:line") + .attr("x1", function(d, i){return getHorizontalPosition(i, levelFactor);}) + .attr("y1", function(d, i){return getVerticalPosition(i, levelFactor);}) + .attr("x2", function(d, i){return getHorizontalPosition(i+1, levelFactor);}) + .attr("y2", function(d, i){return getVerticalPosition(i+1, levelFactor);}) + .attr("class", "line").style("stroke", "grey").style("stroke-width", "0.5px").attr("transform", "translate(" + (cfg.w/2-levelFactor) + ", " + (cfg.h/2-levelFactor) + ")"); + + } + + series = 0; + + var axis = g.selectAll(".axis").data(allAxis).enter().append("g").attr("class", "axis"); + + axis.append("line") + .attr("x1", cfg.w/2) + .attr("y1", cfg.h/2) + .attr("x2", function(j, i){return getHorizontalPosition(i, cfg.w/2, cfg.factor);}) + .attr("y2", function(j, i){return getVerticalPosition(i, cfg.h/2, cfg.factor);}) + .attr("class", "line").style("stroke", "grey").style("stroke-width", "1px"); + + axis.append("text").attr("class", function(d){ return "legend_" + d.replace("/", "") }) + .text(function(d){return d}) + .style("font-size", cfg.fontSize + "px") + .style("text-anchor", function(d, i){ + var p = getHorizontalPosition(i, 0.5); + return (p < 0.4) ? "start" : ((p > 0.6) ? "end" : "middle"); + }) + .attr("transform", function(d, i){ + var p = getVerticalPosition(i, cfg.h / 2); + return p < cfg.fontSize ? "translate(0, " + (cfg.fontSize - p) + ")" : ""; + }) + .attr("x", function(d, i){return getHorizontalPosition(i, cfg.w / 2, cfg.factorLegend);}) + .attr("y", function(d, i){return getVerticalPosition(i, cfg.h / 2, cfg.factorLegend);}); + + d.forEach(function(y, x){ + dataValues = []; + g.selectAll(".nodes") + .data(y, function(j, i){ + dataValues.push([ + getHorizontalPosition(i, cfg.w/2, (parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor), + getVerticalPosition(i, cfg.h/2, (parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor) + ]); + }); + dataValues.push(dataValues[0]); + g.selectAll(".area") + .data([dataValues]) + .enter() + .append("polygon") + .attr("class", "radar-chart-serie"+series) + .style("stroke-width", "2px") + .style("stroke", cfg.color(series)) + .attr("points",function(d) { + var str=""; + for(var pti=0;pti<d.length;pti++){ + str=str+d[pti][0]+","+d[pti][1]+" "; + } + return str; + }) + .style("fill", function(j, i){return cfg.color(series)}) + .style("fill-opacity", cfg.opacityArea) + .on('mouseover', function (d){ + z = "polygon."+d3.select(this).attr("class"); + g.selectAll("polygon").transition(200).style("fill-opacity", 0.1); + g.selectAll(z).transition(200).style("fill-opacity", .7); + }) + .on('mouseout', function(){ + g.selectAll("polygon").transition(200).style("fill-opacity", cfg.opacityArea); + }); + series++; + }); + series=0; + + + d.forEach(function(y, x){ + g.selectAll(".nodes") + .data(y).enter() + .append("svg:circle").attr("class", "radar-chart-serie"+series) + .attr('r', cfg.radius) + .attr("alt", function(j){return Math.max(j.value, 0)}) + .attr("cx", function(j, i){ + dataValues.push([ + getHorizontalPosition(i, cfg.w/2, (parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor), + getVerticalPosition(i, cfg.h/2, (parseFloat(Math.max(j.value, 0))/cfg.maxValue)*cfg.factor) + ]); + return getHorizontalPosition(i, cfg.w/2, (Math.max(j.value, 0)/cfg.maxValue)*cfg.factor); + }) + .attr("cy", function(j, i){ + return getVerticalPosition(i, cfg.h/2, (Math.max(j.value, 0)/cfg.maxValue)*cfg.factor); + }) + .attr("data-id", function(j){return j.axis}) + .style("fill", cfg.color(series)).style("fill-opacity", .9) + .on('mouseover', function (d){ + newX = parseFloat(d3.select(this).attr('cx')) - 10; + newY = parseFloat(d3.select(this).attr('cy')) - 5; + tooltip.attr('x', newX).attr('y', newY).text(d.value).transition(200).style('opacity', 1); + z = "polygon."+d3.select(this).attr("class"); + g.selectAll("polygon").transition(200).style("fill-opacity", 0.1); + g.selectAll(z).transition(200).style("fill-opacity", .7); + }) + .on('mouseout', function(){ + tooltip.transition(200).style('opacity', 0); + g.selectAll("polygon").transition(200).style("fill-opacity", cfg.opacityArea); + }) + .append("svg:title") + .text(function(j){return Math.max(j.value, 0)}); + + series++; + }); + //Tooltip + tooltip = g.append('text').style('opacity', 0).style('font-family', 'sans-serif').style('font-size', '13px'); + } +} + +</script> +<script type="text/javascript">window.onload = function() { + var cpu = 88.6; + var mpi = 11.4; + var io = 0.0; + + // Draw radar chart, choose its color based on application classification + var radar_data = [[ { axis: "CPU", value: cpu }, + { axis: "MPI", value: mpi }, + { axis: "I/O", value: io } ]]; + var radar_options = { w: 200, h: 200, factor: 0.7, fontSize: 16, radius: 0, + opacityArea: 0.64, maxValue: 100, + color: function() { + if ("cpu" == "io") return "#ed8140"; + if ("cpu" == "cpu") return "#4fd32e"; + if ("cpu" == "mpi") return "#409ded"; + else return "#bb58d6"; + } + }; + RadarChart.draw("#time_radar", radar_data, radar_options); + + // Wrap the exe path on slashes if necessary + var exe_path = document.getElementById("exe_path"); + exe_path.innerHTML = exe_path.innerHTML.replace(/\//g, '/​'); // insert a zero width space after slashes as a word wrap hint + + // Format our byte value numbers to show at most 2 decimal places, but only if required to attain 3 digits of precision + // 1234.56 -> "1234" + // 12.3456 -> "12.3" + // 1.23456 -> "1.23" + // 0.00000 -> "0.00" + var formatNumber = function(num) { + if (num > 100) return new Number(num).toFixed(0); // 1234.56 -> 1234 + if (num > 10) return new Number(num).toFixed(1); // 12.3456 -> 12.3 + else return new Number(num).toFixed(2); // 1.23456 -> 1.23 and 0 -> 0.00 + } + + // Parse a number, but return 0 for invalid numbers, not NaN + var toNumber = function(num_str) { + var num = Number(num_str) + return isNaN(num) ? 0 : num; + } + + // Scale byte values to bytes, kB, MB or GB as appropriate + var fillBytes = function(base_name, bytes_str, per_second) { + var units = "bytes"; + var scale = 1.0; + var bytes = Number(bytes_str) + if (isNaN(bytes) ) { units = "" ; } + else if (bytes > 1000000000) { units = "GB"; scale = 1*1000*1000*1000; } + else if (bytes > 1000000 ) { units = "MB"; scale = 1*1000*1000 } + else if (bytes > 1000 ) { units = "kB"; scale = 1*1000 } + var num = isNaN(bytes) ? bytes_str : formatNumber(bytes / scale); + var elementNum = document.getElementById(base_name + "_num"); + elementNum.innerHTML = num; + var elementUnits = document.getElementById(base_name + "_units"); + elementUnits.innerHTML = units + (per_second && !isNaN(bytes) ? "/s" : ""); + }; + + // Fill all the byte numbers and their units with the scaled values + fillBytes("mpi_colrate", "1.65e+02", true); + fillBytes("mpi_p2prate", "0.00e+00", true); + fillBytes("io_readrate", "0.00e+00", true); + fillBytes("io_writerate", "0.00e+00", true); + fillBytes("ram_mean", "2.33e+07", false); + fillBytes("ram_peak", "2.35e+07", false); + + // Set widths for all graphs + var bar = function(name, width) { + var rounded = Math.round(width); + if (!isFinite(rounded) || rounded < 1) + document.getElementById(name).style.width = "1px" + else + document.getElementById(name).style.width = rounded + "px"; + }; + bar("cpu_bar", cpu * 2); + bar("mpi_bar", mpi * 2); + bar("io_bar", io * 2); + + bar("cpu_num_bar", toNumber("50.0") / 2); + bar("cpu_vec_bar", toNumber("50.0") / 2); + bar("cpu_mem_bar", toNumber("0.0") / 2); + bar("cpu_other_bar", toNumber("0.0") / 2); + + bar("mpi_col_bar", toNumber("100.0") / 2); + bar("mpi_p2p_bar", toNumber("0.0") / 2); + var mpi_scale = Math.max(toNumber("1.65e+02"), toNumber("0.00e+00")); + bar("mpi_colrate_bar", 50 * toNumber("1.65e+02") / mpi_scale); + bar("mpi_p2prate_bar", 50 * toNumber("0.00e+00") / mpi_scale); + + bar("io_read_bar", toNumber("0.0") / 2); + bar("io_write_bar", toNumber("0.0") / 2); + var io_scale = Math.max(toNumber("0.00e+00"), toNumber("0.00e+00")); + bar("io_readrate_bar", 50 * toNumber("0.00e+00") / io_scale); + bar("io_writerate_bar", 50 * toNumber("0.00e+00") / io_scale); + + bar("ram_mean_bar", 50 * toNumber("2.33e+07") / toNumber("2.35e+07")); + bar("ram_peak_bar", 50); + bar("ram_node_bar", toNumber("2.8") / 2); + + // Formatting replacement helper function + var replaceIn = function(elementId, re, new_text) { + element = document.getElementById(elementId); + element.innerHTML = element.innerHTML.replace(re, new_text); + }; + + // Add formatting for the overview advice section + replaceIn('overview_advice', /CPU/g, '<span class="cpu_span">CPU</span>'); + replaceIn('overview_advice', /MPI/g, '<span class="mpi_span">MPI</span>'); + replaceIn('overview_advice', /I\/O/g, '<span class="io_span">I\/O</span>'); + + // Add formatting for the CPU advice section + if ( 88.6 < 0.05 ) + replaceIn('cpu_explanation', /application code/g, '<span class="cpu_span">application code</span>'); + replaceIn('cpu_explanation', /vectorized instructions/g, '<span class="cpu_vec_span">vectorized instructions</span>'); + replaceIn('cpu_explanation', /memory accesses/g, '<span class="cpu_mem_span">memory accesses</span>'); + replaceIn('cpu_explanation', /memory-bound/g, '<span class="cpu_mem_span">memory-bound</span>'); + replaceIn('cpu_explanation', /arithmetic-bound/g, '<span class="cpu_num_span">arithmetic-bound</span>'); + replaceIn('cpu_explanation', /numerical computation/g, '<span class="cpu_num_span">numerical computation</span>'); + + // Add formatting for the MPI advice section + if ( 11.4 < 0.05 ) + replaceIn('mpi_explanation', /MPI/g, '<span class="mpi_span">MPI</span>'); + replaceIn('mpi_explanation', /collective calls/g, '<span class="mpi_col_span">collective calls</span>'); + replaceIn('mpi_explanation', /point-to-point calls/g, '<span class="mpi_p2p_span">point-to-point calls</span>'); + replaceIn('mpi_explanation', / very low/g, ' <span class="mpi_colrate_span">very low</span>'); + replaceIn('mpi_explanation', / low/g, ' <span class="mpi_colrate_span">low</span>'); + replaceIn('mpi_explanation', / average/g, ' <span class="mpi_colrate_span">average</span>'); + replaceIn('mpi_explanation', / very high/g, ' <span class="mpi_colrate_span">very high</span>'); + replaceIn('mpi_explanation', / high/g, ' <span class="mpi_colrate_span">high</span>'); + + // Add formatting for the I/O advice section + if ( 0.0 < 0.05 ) + replaceIn('io_explanation', /I\/O/g, '<span class="io_span">I/O</span>'); + replaceIn('io_explanation', /read operations/g, '<span class="io_read_span">read operations</span>'); + replaceIn('io_explanation', /write operations/g, '<span class="io_write_span">write operations</span>'); + replaceIn('io_explanation', / very low/g, ' <span class="io_readrate_span">very low</span>'); + replaceIn('io_explanation', / low/g, ' <span class="io_readrate_span">low</span>'); + replaceIn('io_explanation', / average/g, ' <span class="io_readrate_span">average</span>'); + replaceIn('io_explanation', / very high/g, ' <span class="io_readrate_span">very high</span>'); + replaceIn('io_explanation', / high/g, ' <span class="io_readrate_span">high</span>'); + + // Add formatting for the RAM advice section + replaceIn('ram_explanation', /high/g, ' <span class="ram_node_span">high</span>'); + replaceIn('ram_explanation', /well-balanced/g, '<span class="ram_peak_span">well-balanced</span>'); + replaceIn('ram_explanation', /significant variation/g, '<span class="ram_peak_span">significant variation</span>'); + replaceIn('ram_explanation', /peak node memory usage/g, '<span class="ram_node_span">peak node memory usage</span>'); + + // Hide the error warning if the script got this far + document.getElementById('error').style.display = 'none'; +}; +</script> +</head> + +<body> +<div id="content"> + +<div class="header"> + <div class="logo"><img src="http://content.allinea.com/downloads/performance-report-logo.png" alt="Allinea Performance Reports" /></div> + <div class="header_left"> + <div class="application_details"> + <table> + <tr><td class="details_key">Executable:</td><td id="exe_name">mympiprog.x</td></tr> + <tr><td class="details_key">Resources:</td><td id="num_procs">32 processes, 2 nodes</td></tr> + <tr><td class="details_key">Machine:</td><td id="machine_name">cn182</td></tr> + <tr><td class="details_key">Start time:</td><td id="start_date">Wed Oct 15 16:56:23 2014</td></tr> + <tr><td class="details_key">Total time:</td><td id="time_string">7 seconds (0 minutes)</td></tr> + <tr><td class="details_key">Full path:</td><td id="exe_path">/home/user</td></tr> + <tr><td class="details_key">Notes:</td><td id="notes"></td></tr> + </table> + </div> + </div> + <div id="time_radar"></div> + <div class="clear"></div> +</div> +<hr /> +<div id="error"> +<p><strong>Error: javascript is not running</strong></p> +<p>The graphs in this Performance Report require <strong>javascript</strong>, which is disabled or not working.</p> +<p>Check whether your javascript support is enabled or try another browser.<p> +<p>Remember, you can always contact <a href="mailto:support@allinea.com">support@allinea.com</a>, we're very nice!</p> +</div> +<div class="summary"> + <div class="heading">Summary: mympiprog.x is <span class="cpu_span">CPU-bound</span> in this configuration</div> + <div>The total wallclock time was spent as follows:</div> + <table class="summary_table"> + <tr><td class="heading_cpu">CPU</td><td class="percent">88.6%</td><td class="bar_graph"><div id="cpu_bar" /></td> + <td class="details"><p>Time spent running application code. High values are usually good.</p><p>This is <span id="summary_cpu_class">high</span>; check the CPU performance section for optimization advice.</p></td></tr> + <tr><td class="heading_mpi">MPI</td><td class="percent">11.4%</td><td class="bar_graph"><div id="mpi_bar" /></td> + <td class="details"><p>Time spent in MPI calls. High values are usually bad.</p><p>This is <span id="summary_mpi_class">very low</span>; this code may benefit from increasing the process count.</p></td></tr> + <tr><td class="heading_io">I/O</td><td class="percent">0.0%</td><td class="bar_graph"><div id="io_bar" /></td> + <td class="details"><p>Time spent in filesystem I/O. High values are usually bad.</p><p>This is <span id="summary_io_class">negligible</span>; there's no need to investigate I/O performance.</p></td></tr> + </table> + <div class="overview_general_advice"><p>This application run was <span class="cpu_span">CPU-bound</span>. A breakdown of this time and advice for investigating further is in the <span class="cpu_span">CPU</span> section below.</p><p id="overview_advice">As very little time is spent in MPI calls, this code may also benefit from running at larger scales.</p></div> +</div> +<hr /> +<div class="subsections"> + <div class="ltcol"> + <div class="heading_cpu">CPU</div> + <div>A breakdown of how the <span class="cpu_span">88.6</span>% total CPU time was spent:</div> + <table id="cpu_chart"> + <tr><td>Scalar numeric ops</td><td class="right_cell"><span class="cpu_num_span">50.0</span>%</td><td class="mini_bar_graph"><div id="cpu_num_bar" /></td></tr> + <tr><td>Vector numeric ops</td><td class="right_cell"><span class="cpu_vec_span">50.0</span>%</td><td class="mini_bar_graph"><div id="cpu_vec_bar" /></td></tr> + <tr><td>Memory accesses</td><td class="right_cell"><span class="cpu_mem_span">0.0</span>%</td><td class="mini_bar_graph"><div id="cpu_mem_bar" /></td></tr> + <tr><td>Other</td><td class="right_cell"><span class="cpu_other_span">0.0</span>%</td><td class="mini_bar_graph"><div id="cpu_other_bar" /></td></tr> + </table> + <div id="cpu_explanation"> + <div class="explanation">The per-core performance is arithmetic-bound. Try to increase the amount of time spent in vectorized instructions by analyzing the compiler's vectorization reports.</div> + <div class="explanation"></div> + </div> + </div> + <div class="rtcol"> + <div class="heading_mpi">MPI</div> + <div>Of the <span class="mpi_span">11.4</span>% total time spent in MPI calls:</div> + <table id="mpi_chart"> + <tr><td>Time in collective calls</td><td class="right_cell"><span class="mpi_col_span">100.0</span>%</td><td class="mini_bar_graph"><div id="mpi_col_bar" /></td></tr> + <tr><td>Time in point-to-point calls</td><td class="right_cell"><span class="mpi_p2p_span">0.0</span>%</td><td class="mini_bar_graph"><div id="mpi_p2p_bar" /></td></tr> + <tr><td>Effective process collective rate</td><td class="right_cell"><span class="mpi_colrate_span"><span id="mpi_colrate_num">1.65e+02</span></span> <span id="mpi_colrate_units"></span></td><td class="mini_bar_graph"><div id="mpi_colrate_bar" /></td></tr> + <tr><td>Effective process point-to-point rate</td><td class="right_cell"><span class="mpi_p2prate_span"><span id="mpi_p2prate_num">0.00e+00</span></span> <span id="mpi_p2prate_units"></span></td><td class="mini_bar_graph"><div id="mpi_p2prate_bar" /></td></tr> + </table> + <div id="mpi_explanation"> + <div class="explanation">Most of the time is spent in collective calls with a very low transfer rate. This suggests load imbalance is causing synchonization overhead; use an MPI profiler to investigate further.</div> + <div class="explanation"></div> + </div> + </div> + <div class="clear"></div> +</div> +<div class="subsections"> + <div class="ltcol"> + <div class="heading_io">I/O</div> + <div>A breakdown of how the <span class="io_span">0.0</span>% total I/O time was spent:</div> + <table id="io_chart"> + <tr><td>Time in reads</td><td class="right_cell"><span class="io_read_span">0.0</span>%</td><td class="mini_bar_graph"><div id="io_read_bar" /></td></tr> + <tr><td>Time in writes</td><td class="right_cell"><span class="io_write_span">0.0</span>%</td><td class="mini_bar_graph"><div id="io_write_bar" /></td></tr> + <tr><td>Effective process read rate</td><td class="right_cell"><span class="io_readrate_span"><span id="io_readrate_num">0.00e+00</span></span> <span id="io_readrate_units"></span></td><td class="mini_bar_graph"><div id="io_readrate_bar" /></td></tr> + <tr><td>Effective process write rate</td><td class="right_cell"><span class="io_writerate_span"><span id="io_writerate_num">0.00e+00</span></span> <span id="io_writerate_units"></span></td><td class="mini_bar_graph"><div id="io_writerate_bar" /></td></tr> + </table> + <div id="io_explanation"> + <div class="explanation">No time is spent in I/O operations. There's nothing to optimize here!</div> + <div class="explanation"></div> + </div> + </div> + <div class="rtcol"> + <div class="heading_ram">Memory</div> + <div>Per-process memory usage may also affect scaling:</div> + <table id="ram_chart"> + <tr><td>Mean process memory usage</td><td class="right_cell"><span class="ram_mean_span"><span id="ram_mean_num">2.33e+07</span></span> <span id="ram_mean_units"></span></td><td class="mini_bar_graph"><div id="ram_mean_bar" /></td></tr> + <tr><td>Peak process memory usage</td><td class="right_cell"><span class="ram_peak_span"><span id="ram_peak_num">2.35e+07</span></span> <span id="ram_peak_units"></span></td><td class="mini_bar_graph"><div id="ram_peak_bar" /></td></tr> + <tr><td>Peak node memory usage</td><td class="right_cell"><span class="ram_node_span">2.8</span>%</td><td class="mini_bar_graph"><div id="ram_node_bar" /></td></tr> + </table> + <div id="ram_explanation"> + <div class="explanation">The peak node memory usage is very low. You may be able to reduce the amount of allocation time used by running with fewer MPI processes and more data on each process.</div> + <div class="explanation"></div> + </div> + </div> + <div class="clear"></div> +</div> +</div> <!-- content --> +</body> +</html> diff --git a/docs.it4i/src/mympiprog_32p_2014-10-15_16-56.txt b/docs.it4i/src/mympiprog_32p_2014-10-15_16-56.txt new file mode 100644 index 0000000000000000000000000000000000000000..de8449179640fd943a9f007f9eda084b11f2a455 --- /dev/null +++ b/docs.it4i/src/mympiprog_32p_2014-10-15_16-56.txt @@ -0,0 +1,50 @@ +Executable: mympiprog.x +Resources: 32 processes, 2 nodes +Machine: cn182 +Started on: Wed Oct 15 16:56:23 2014 +Total time: 7 seconds (0 minutes) +Full path: /home/user +Notes: + +Summary: mympiprog.x is CPU-bound in this configuration +CPU: 88.6% |========| +MPI: 11.4% || +I/O: 0.0% | +This application run was CPU-bound. A breakdown of this time and advice for investigating further is found in the CPU section below. +As very little time is spent in MPI calls, this code may also benefit from running at larger scales. + +CPU: +A breakdown of how the 88.6% total CPU time was spent: +Scalar numeric ops: 50.0% |====| +Vector numeric ops: 50.0% |====| +Memory accesses: 0.0% | +Other: 0.0% | +The per-core performance is arithmetic-bound. Try to increase the amount of time spent in vectorized instructions by analyzing the compiler's vectorization reports. + + +MPI: +A breakdown of how the 11.4% total MPI time was spent: +Time in collective calls: 100.0% |=========| +Time in point-to-point calls: 0.0% | +Effective collective rate: 1.65e+02 bytes/s +Effective point-to-point rate: 0.00e+00 bytes/s +Most of the time is spent in collective calls with a very low transfer rate. This suggests load imbalance is causing synchonization overhead; use an MPI profiler to investigate further. + + +I/O: +A breakdown of how the 0.0% total I/O time was spent: +Time in reads: 0.0% | +Time in writes: 0.0% | +Effective read rate: 0.00e+00 bytes/s +Effective write rate: 0.00e+00 bytes/s +No time is spent in I/O operations. There's nothing to optimize here! + + +Memory: +Per-process memory usage may also affect scaling: +Mean process memory usage: 2.33e+07 bytes +Peak process memory usage: 2.35e+07 bytes +Peak node memory usage: 2.8% | +The peak node memory usage is very low. You may be able to reduce the amount of allocation time used by running with fewer MPI processes and more data on each process. + + diff --git a/mkdocs.yml b/mkdocs.yml index 5a9bf6a77dc7b0ae2bb5d6d73299736677d7941d..0c557765ed5ea27ccfd005a03f8c943a3947469e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -17,118 +17,53 @@ pages: - Home: index.md # - History of Downtimes: downtimes_history.md - General: - - Applying for Resources: get-started-with-it4innovations/applying-for-resources.md - - Obtaining Login Credentials: get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials.md - - Certificates FAQ: get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md - - Accessing the Clusters: get-started-with-it4innovations/accessing-the-clusters/introduction.md -# - VPN-Connection-Fail-in-Win-8.1: get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md - - SSH Keys: get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md - - PuTTY: get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md - - PuTTY Pageant SSH Agent: get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md - - PuTTY Key Generator: get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md - - X Window System: get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md - - X Window System Using Cygwin: get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md - - VNC: get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc.md - - VPN Access: get-started-with-it4innovations/vpn-access.md - - Anselm Cluster: - - Introduction: anselm-cluster-documentation/introduction.md - - Hardware Overview: anselm-cluster-documentation/hardware-overview.md - - Shell and Data Access: anselm-cluster-documentation/shell-and-data-access.md - - Environment and Modules: anselm-cluster-documentation/environment-and-modules.md - - Compute Nodes: anselm-cluster-documentation/compute-nodes.md - - Remote Visualization: anselm-cluster-documentation/remote-visualization.md - - Network: anselm-cluster-documentation/network.md - - PRACE User Support: anselm-cluster-documentation/prace.md - - Resource Allocation Policy: anselm-cluster-documentation/resources-allocation-policy.md - - Job Priority: anselm-cluster-documentation/job-priority.md - - Job Submission and Execution: anselm-cluster-documentation/job-submission-and-execution.md - - Capacity Computing: anselm-cluster-documentation/capacity-computing.md - - Storage: anselm-cluster-documentation/storage.md - - Software: - - Available Modules: modules-anselm.md - - 'ANSYS': - - Introduction: anselm-cluster-documentation/software/ansys/ansys.md - - ANSYS CFX: anselm-cluster-documentation/software/ansys/ansys-cfx.md - - ANSYS Fluent: anselm-cluster-documentation/software/ansys/ansys-fluent.md - - ANSYS LS-DYNA: anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md - - ANSYS MAPDL: anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md - - LS-DYNA: anselm-cluster-documentation/software/ansys/ls-dyna.md - - 'Bioinformatics': - - Bioinformatics Applications: software/bio-gentoo.md - - 'Debuggers': - - Allinea Forge (DDT,MAP): anselm-cluster-documentation/software/debuggers/allinea-ddt.md - - Allinea Performance Reports: anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md - - CUBE: anselm-cluster-documentation/software/debuggers/cube.md - - Intel Performance Counter Monitor: anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md - - Intel VTune Amplifier: anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md - - PAPI: anselm-cluster-documentation/software/debuggers/papi.md - - Scalasca: anselm-cluster-documentation/software/debuggers/scalasca.md - - Score-P: anselm-cluster-documentation/software/debuggers/score-p.md - - Total View: anselm-cluster-documentation/software/debuggers/total-view.md - - VNC: anselm-cluster-documentation/software/debuggers/debuggers.md - - Valgrind: anselm-cluster-documentation/software/debuggers/valgrind.md - - Vampir: anselm-cluster-documentation/software/debuggers/vampir.md - - 'Chemistry': - - Molpro: anselm-cluster-documentation/software/chemistry/molpro.md - - NWChem: anselm-cluster-documentation/software/chemistry/nwchem.md - - ORCA: software/orca.md - - COMSOL: anselm-cluster-documentation/software/comsol-multiphysics.md - - Compilers: anselm-cluster-documentation/software/compilers.md - - GPI-2: anselm-cluster-documentation/software/gpi2.md - - 'Intel Suite': - - Introduction: anselm-cluster-documentation/software/intel-suite/introduction.md - - Intel Compilers: anselm-cluster-documentation/software/intel-suite/intel-compilers.md - - Intel Debugger: anselm-cluster-documentation/software/intel-suite/intel-debugger.md - - Intel IPP: anselm-cluster-documentation/software/intel-suite/intel-integrated-performance-primitives.md - - Intel MKL: anselm-cluster-documentation/software/intel-suite/intel-mkl.md - - Intel TBB: anselm-cluster-documentation/software/intel-suite/intel-tbb.md - - Intel Xeon Phi: anselm-cluster-documentation/software/intel-xeon-phi.md - - ISV Licenses: anselm-cluster-documentation/software/isv_licenses.md - - Java: anselm-cluster-documentation/software/java.md - - 'MPI': - - Introduction: anselm-cluster-documentation/software/mpi/mpi.md - - MPI4Py (MPI for Python): anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md - - Running Open MPI: anselm-cluster-documentation/software/mpi/Running_OpenMPI.md - - Running MPICH2: anselm-cluster-documentation/software/mpi/running-mpich2.md - - 'Numerical Languages': - - Introduction: anselm-cluster-documentation/software/numerical-languages/introduction.md - - Matlab 2013-2014: anselm-cluster-documentation/software/numerical-languages/matlab_1314.md - - Matlab: anselm-cluster-documentation/software/numerical-languages/matlab.md - - Octave: anselm-cluster-documentation/software/numerical-languages/octave.md - - R: anselm-cluster-documentation/software/numerical-languages/r.md - - 'Numerical Libraries': - - FFTW: anselm-cluster-documentation/software/numerical-libraries/fftw.md - - GSL: anselm-cluster-documentation/software/numerical-libraries/gsl.md - - HDF5: anselm-cluster-documentation/software/numerical-libraries/hdf5.md - - Intel Numerical Libraries: anselm-cluster-documentation/software/numerical-libraries/intel-numerical-libraries.md - - MAGMA for Intel Xeon Phi: anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md - - PETSc: anselm-cluster-documentation/software/numerical-libraries/petsc.md - - Trilinos: anselm-cluster-documentation/software/numerical-libraries/trilinos.md - - NVIDIA CUDA: anselm-cluster-documentation/software/nvidia-cuda.md - - 'Omics Master': - - Diagnostic Component (TEAM): anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md - - Priorization Component (BiERApp): anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md - - Overview: anselm-cluster-documentation/software/omics-master/overview.md - - OpenFOAM: anselm-cluster-documentation/software/openfoam.md - - Operating System: anselm-cluster-documentation/software/operating-system.md - - ParaView: anselm-cluster-documentation/software/paraview.md - - Virtualization: anselm-cluster-documentation/software/kvirtualization.md + - Applying for Resources: general/applying-for-resources.md + - Obtaining Login Credentials: general/obtaining-login-credentials/obtaining-login-credentials.md + - Certificates FAQ: general/obtaining-login-credentials/certificates-faq.md + - Accessing the Clusters: + #- Introduction: general/accessing-the-clusters/introduction.md + - OpenSSH Keys (UN*X): general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md + - PuTTY (Windows): general/accessing-the-clusters/shell-access-and-data-transfer/putty.md + - X Window System: general/accessing-the-clusters/graphical-user-interface/x-window-system.md + - VNC: general/accessing-the-clusters/graphical-user-interface/vnc.md + - VPN Access: general/accessing-the-clusters/vpn-access.md +# - VPN-Connection-Fail-in-Win-8.1: general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md - Salomon Cluster: - Introduction: salomon/introduction.md - - PRACE User Support: salomon/prace.md - - Environment and Modules: salomon/environment-and-modules.md - Hardware Overview: salomon/hardware-overview.md - - Shell and Data Access: salomon/shell-and-data-access.md + - Accessing the Cluster: salomon/shell-and-data-access.md + - Environment and Modules: salomon/environment-and-modules.md + - Resource Allocation and Job Execution: + - Resources Allocation Policy: salomon/resources-allocation-policy.md + - Job Scheduling: salomon/job-priority.md + - Job Submission and Execution: salomon/job-submission-and-execution.md + - Capacity Computing: salomon/capacity-computing.md - Compute Nodes: salomon/compute-nodes.md - - Salomon Cluster Network: salomon/network.md - - IB Single-plane Topology: salomon/ib-single-plane-topology.md - - 7D Enhanced Hypercube: salomon/7d-enhanced-hypercube.md - - Resources Allocation Policy: salomon/resources-allocation-policy.md - - Job Scheduling: salomon/job-priority.md - - Job Submission and Execution: salomon/job-submission-and-execution.md - - Capacity Computing: salomon/capacity-computing.md + - Network: + - InfiniBand Network: salomon/network.md + - IB Single-plane Topology: salomon/ib-single-plane-topology.md + - 7D Enhanced Hypercube: salomon/7d-enhanced-hypercube.md - Storage: salomon/storage.md - - Software: + - PRACE User Support: salomon/prace.md + - Anselm Cluster: + - Introduction: anselm/introduction.md + - Hardware Overview: anselm/hardware-overview.md + - Accessing the Cluster: anselm/shell-and-data-access.md + - Environment and Modules: anselm/environment-and-modules.md + - Resource Allocation and Job Execution: + - Resource Allocation Policy: anselm/resources-allocation-policy.md + - Job Priority: anselm/job-priority.md + - Job Submission and Execution: anselm/job-submission-and-execution.md + - Capacity Computing: anselm/capacity-computing.md + - Compute Nodes: anselm/compute-nodes.md + - Storage: anselm/storage.md + - Network: anselm/network.md + - Remote Visualization: anselm/remote-visualization.md + - PRACE User Support: anselm/prace.md + - 'Software': + - Lmod Environment: software/lmod.md + - Modules Matrix: modules-matrix.md + - Salomon Software: - Available Modules: modules-salomon.md - Available Modules on UV: modules-salomon-uv.md - 'ANSYS': @@ -141,7 +76,7 @@ pages: - Setting License Preferences: salomon/software/ansys/licensing.md - Licensing and Available Versions: salomon/software/ansys/setting-license-preferences.md - 'Bioinformatics': - - Bioinformatics Applications: software/bio-gentoo.md + - Bioinformatics Applications: software/bioinformatics.md - 'Chemistry': - Molpro: salomon/software/chemistry/molpro.md - NWChem: salomon/software/chemistry/nwchem.md @@ -182,9 +117,76 @@ pages: - Octave: salomon/software/numerical-languages/octave.md - R: salomon/software/numerical-languages/r.md - Operating System: salomon/software/operating-system.md -# - 'Software': -# - Modules Matrix: modules-matrix.md - - Modules Matrix: modules-matrix.md + - Anselm Software: + - Available Modules: modules-anselm.md + - 'ANSYS': + - Introduction: anselm/software/ansys/ansys.md + - ANSYS CFX: anselm/software/ansys/ansys-cfx.md + - ANSYS Fluent: anselm/software/ansys/ansys-fluent.md + - ANSYS LS-DYNA: anselm/software/ansys/ansys-ls-dyna.md + - ANSYS MAPDL: anselm/software/ansys/ansys-mechanical-apdl.md + - LS-DYNA: anselm/software/ansys/ls-dyna.md + - 'Bioinformatics': + - Bioinformatics Applications: software/bioinformatics.md + - 'Debuggers': + - Allinea Forge (DDT,MAP): anselm/software/debuggers/allinea-ddt.md + - Allinea Performance Reports: anselm/software/debuggers/allinea-performance-reports.md + - CUBE: anselm/software/debuggers/cube.md + - Intel Performance Counter Monitor: anselm/software/debuggers/intel-performance-counter-monitor.md + - Intel VTune Amplifier: anselm/software/debuggers/intel-vtune-amplifier.md + - PAPI: anselm/software/debuggers/papi.md + - Scalasca: anselm/software/debuggers/scalasca.md + - Score-P: anselm/software/debuggers/score-p.md + - Total View: anselm/software/debuggers/total-view.md + - VNC: anselm/software/debuggers/debuggers.md + - Valgrind: anselm/software/debuggers/valgrind.md + - Vampir: anselm/software/debuggers/vampir.md + - 'Chemistry': + - Molpro: anselm/software/chemistry/molpro.md + - NWChem: anselm/software/chemistry/nwchem.md + - ORCA: software/orca.md + - COMSOL: anselm/software/comsol-multiphysics.md + - Compilers: anselm/software/compilers.md + - GPI-2: anselm/software/gpi2.md + - 'Intel Suite': + - Introduction: anselm/software/intel-suite/introduction.md + - Intel Compilers: anselm/software/intel-suite/intel-compilers.md + - Intel Debugger: anselm/software/intel-suite/intel-debugger.md + - Intel IPP: anselm/software/intel-suite/intel-integrated-performance-primitives.md + - Intel MKL: anselm/software/intel-suite/intel-mkl.md + - Intel TBB: anselm/software/intel-suite/intel-tbb.md + - Intel Xeon Phi: anselm/software/intel-xeon-phi.md + - ISV Licenses: anselm/software/isv_licenses.md + - Java: anselm/software/java.md + - 'MPI': + - Introduction: anselm/software/mpi/mpi.md + - MPI4Py (MPI for Python): anselm/software/mpi/mpi4py-mpi-for-python.md + - Running Open MPI: anselm/software/mpi/Running_OpenMPI.md + - Running MPICH2: anselm/software/mpi/running-mpich2.md + - 'Numerical Languages': + - Introduction: anselm/software/numerical-languages/introduction.md + - Matlab 2013-2014: anselm/software/numerical-languages/matlab_1314.md + - Matlab: anselm/software/numerical-languages/matlab.md + - Octave: anselm/software/numerical-languages/octave.md + - R: anselm/software/numerical-languages/r.md + - 'Numerical Libraries': + - FFTW: anselm/software/numerical-libraries/fftw.md + - GSL: anselm/software/numerical-libraries/gsl.md + - HDF5: anselm/software/numerical-libraries/hdf5.md + - Intel Numerical Libraries: anselm/software/numerical-libraries/intel-numerical-libraries.md + - MAGMA for Intel Xeon Phi: anselm/software/numerical-libraries/magma-for-intel-xeon-phi.md + - PETSc: anselm/software/numerical-libraries/petsc.md + - Trilinos: anselm/software/numerical-libraries/trilinos.md + - NVIDIA CUDA: anselm/software/nvidia-cuda.md + - 'Omics Master': + - Diagnostic Component (TEAM): anselm/software/omics-master/diagnostic-component-team.md + - Priorization Component (BiERApp): anselm/software/omics-master/priorization-component-bierapp.md + - Overview: anselm/software/omics-master/overview.md + - OpenFOAM: anselm/software/openfoam.md + - Operating System: anselm/software/operating-system.md + - ParaView: anselm/software/paraview.md + - Virtualization: anselm/software/virtualization.md +# - Modules Matrix: modules-matrix.md - PBS Pro Documentation: pbspro.md # - Testing: # - Colors: colors.md diff --git a/package.json b/package.json new file mode 100644 index 0000000000000000000000000000000000000000..92922d19c6cf3b1835f9e97959c74f8138ea4e32 --- /dev/null +++ b/package.json @@ -0,0 +1,9 @@ +{ + "scripts": { + "lint-md": "remark ." + }, + + "remarkConfig": { + "presets": ["lint-recommended"] + } +} diff --git a/scripts/get_modules.sh b/scripts/get_modules.sh index a92b45bc34b5ba9f65380f5e3027267473eb37e6..b356329cdd2d1cd45497e965c8646456e1f249bf 100755 --- a/scripts/get_modules.sh +++ b/scripts/get_modules.sh @@ -2,3 +2,6 @@ curl -s https://code.it4i.cz/hrb33/modules-anselm/raw/master/anselm.md -o docs.it4i/modules-anselm.md curl -s https://code.it4i.cz/hrb33/modules-salomon/raw/master/salomon.md -o docs.it4i/modules-salomon.md curl -s https://code.it4i.cz/hrb33/modules-salomon/raw/master/salomon-uv.md -o docs.it4i/modules-salomon-uv.md +curl -s https://code.it4i.cz/hrb33/modules-anselm/raw/master/anselm.csv -o scripts/modules-anselm.csv +curl -s https://code.it4i.cz/hrb33/modules-salomon/raw/master/salomon.csv -o scripts/modules-salomon.csv +curl -s https://code.it4i.cz/hrb33/modules-salomon/raw/master/salomon-uv.csv -o scripts/modules-salomon-uv.csv diff --git a/scripts/cluster_modules.py b/scripts/modules-matrix.py similarity index 83% rename from scripts/cluster_modules.py rename to scripts/modules-matrix.py index 057361b27097030eba0cb8851ea8e97df0fbd913..736f6a9b15f48e82b290e1dc2ff2034a72325a07 100755 --- a/scripts/cluster_modules.py +++ b/scripts/modules-matrix.py @@ -12,9 +12,9 @@ def get_data(filename): return list(reader) # only return the reader when you have finished. your_list = [] -your_list += get_data('modules-anselm.csv') -your_list += get_data('modules-salomon.csv') -your_list += get_data('modules-salomon-uv.csv') +your_list += get_data('./scripts/modules-anselm.csv') +your_list += get_data('./scripts/modules-salomon.csv') +your_list += get_data('./scripts/modules-salomon-uv.csv') #print your_list #a=[["python/2.8.1",1],["python/2.9.1",2],["python/2.8.1",4],["python/3.0.1",4]] @@ -37,6 +37,11 @@ c=[ "USA", ] +print '!!! Hint "Cluster Acronyms"' +print ' \* A - Anselm' +print ' \* S - Salomon' +print ' \* U - uv1 at Salomon' + print "| Module | Versions | Clusters |" print "| ------ | -------- | -------- |" diff --git a/scripts/preklopeni_dokumentace/html_md.sh b/scripts/preklopeni_dokumentace/html_md.sh index ddb7380e8d2588cfbaeaf46b993543927f75e98a..cdb10e39e654c62fbe6cde3da96448bc4b294199 100755 --- a/scripts/preklopeni_dokumentace/html_md.sh +++ b/scripts/preklopeni_dokumentace/html_md.sh @@ -5,73 +5,12 @@ # version: 1.00 ### -if [ "$1" = "-d" ]; then - # remove pdf, md and epub files - STARTTIME=$(date +%s) - if [ "$2" = "pdf" ]; then - echo "$(tput setaf 9)*.pdf deleted$(tput setaf 15)" - if [ -d ./pdf ]; then - rm -rf ./pdf - fi - elif [ "$2" = "epub" ]; then - echo "$(tput setaf 9)*.epub deleted$(tput setaf 15)" - if [ -d ./epub ]; then - rm -rf ./epub - fi - elif [ "$2" = "md" ]; then - echo "$(tput setaf 9)*.md deleted$(tput setaf 15)" - if [ -d ./converted ]; then - rm -rf ./converted - fi - elif [ "$2" = "all" ]; then - echo "$(tput setaf 9)all files deleted$(tput setaf 15)" - if [ -d ./docs.it4i ]; then - rm -rf ./converted - fi - if [ -d ./epub ]; then - rm -rf ./epub - fi - if [ -d ./pdf ]; then - rm -rf ./pdf - fi - if [ -d ./info ]; then - rm -rf ./info - fi - if [ -d ./docs.it4i.cz ]; then - rm -rf ./docs.it4i.cz - fi - fi - ENDTIME=$(date +%s) - echo "It takes $(($ENDTIME - $STARTTIME)) seconds to complete this task..." -fi if [ "$1" = "-w" ]; then # download html pages - STARTTIME=$(date +%s) - rm -rf docs.it4i.cz - wget -X pbspro-documentation,changelog,whats-new,portal_css,portal_javascripts,++resource++jquery-ui-themes,anselm-cluster-documentation/icon.jpg -R favicon.ico,pdf.png,logo.png,background.png,application.png,search_icon.png,png.png,sh.png,touch_icon.png,anselm-cluster-documentation/icon.jpg,*js,robots.txt,*xml,RSS,download_icon.png,pdf,*zip,*rar,@@*,anselm-cluster-documentation/icon.jpg.1 --mirror --convert-links --adjust-extension --page-requisites --no-parent https://docs.it4i.cz; - - # download images - wget --directory-prefix=./docs.it4i.cz/ http://verif.cs.vsb.cz/aislinn/doc/report.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/virtualization/virtualization-job-workflow - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig1.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig2.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig3.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig4.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig5.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig6.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig7.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig7x.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig8.png - wget --directory-prefix=./docs.it4i.cz/ https://docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig9.png - ENDTIME=$(date +%s) - echo "It takes $(($ENDTIME - $STARTTIME)) seconds to complete this task..." - + rm -rf docs-old.it4i.cz + wget -X portal_css,portal_javascripts,++resource++jquery-ui-themes,anselm-cluster-documentation/icon.jpg -R favicon.ico,pdf.png,logo.png,background.png,application.png,search_icon.png,png.png,sh.png,touch_icon.png,anselm-cluster-documentation/icon.jpg,*js,robots.txt,*xml,RSS,download_icon.png,@@*,anselm-cluster-documentation/icon.jpg.1 --mirror --convert-links --adjust-extension --page-requisites --no-parent https://docs-old.it4i.cz; fi if [ "$1" = "-c" ]; then - ### convert html to md - STARTTIME=$(date +%s) - if [ -d ./docs.it4i.cz ]; then - # erasing the previous transfer if [ -d ./docs.it4i ]; then rm -rf ./docs.it4i @@ -80,16 +19,6 @@ if [ "$1" = "-c" ]; then rm -rf ./info; fi - # erasing duplicate files and unwanted files - (while read i; - do - if [ -f "$i" ]; - then - echo "$(tput setaf 9)$i deleted"; - rm "$i"; - fi - done) < ./source/list_rm - # counter for html and md files counter=1 count=$(find . -name "*.html" -type f | wc -l) @@ -100,7 +29,7 @@ if [ "$1" = "-c" ]; then # filtering html files echo "$(tput setaf 12)($counter/$count)$(tput setaf 11)$i"; counter=$((counter+1)) - printf "$(tput setaf 15)\t\tFiltering html files...\n"; + printf "\t\tFiltering html files...\n"; HEAD=$(grep -n -m1 '<h1' "$i" |cut -f1 -d: | tr --delete '\n') END=$(grep -n -m1 '<!-- <div tal:content=' "$i" |cut -f1 -d: | tr --delete '\n') @@ -110,47 +39,10 @@ if [ "$1" = "-c" ]; then sed '1,'"$((HEAD-1))"'d' "$i" | sed -n -e :a -e '1,'"$DOWN"'!{P;N;D;};N;ba' > "${i%.*}TMP.html" # converted .html to .md - printf "\t\t.html => $(tput setaf 13).md\n$(tput setaf 15)" + printf "\t\t.html => .md\n" pandoc -f html -t markdown+pipe_tables-grid_tables "${i%.*}TMP.html" -o "${i%.*}.md"; - rm "${i%.*}TMP.html"; - - # filtering html and css elements... - printf "\t\tFiltering html and css elements in md files...\n" - sed -e 's/``` /```/' "${i%.*}.md" | sed -e 's/<\/div>//g' | sed '/^<div/d' | sed -e 's/<\/span>//' | sed -e 's/^\*\*//' | sed -e 's/\\//g' | sed -e 's/^: //g' | sed -e 's/^Obsah//g' > "${i%.*}TMP.md"; - while read x ; do - arg1=`echo "$x" | cut -d"&" -f1 | sed 's:[]\[\^\$\.\*\/\"]:\\\\&:g'`; - arg2=`echo "$x" | cut -d"&" -f2 | sed 's:[]\[\^\$\.\*\/\"]:\\\\&:g'`; - - sed -e 's/'"$arg1"'/'"$arg2"'/' "${i%.*}TMP.md" > "${i%.*}TMP.TEST.md"; - cat -s "${i%.*}TMP.TEST.md" > "${i%.*}TMP.md"; - done < ./source/replace - - # repair formatting... - printf "\t\tFix formatting text...\n" - while read x ; do - arg1=`echo "$x" | cut -d"&" -f1 | sed 's:[]\[\^\$\.\*\/\"]:\\\\&:g'`; - arg2=`echo "$x" | cut -d"&" -f2 | sed 's:[]\[\^\$\.\*\/\"]:\\\\&:g'`; - - sed -e 's/'"$arg1"'/'"$arg2"'/' "${i%.*}TMP.md" | sed -e 's/^``//g' > "${i%.*}TMP.TEST.md"; - cat -s "${i%.*}TMP.TEST.md" > "${i%.*}TMP.md"; - done < ./source/formatting - - # last repair formatting... - printf "\t\tLatest fix formatting text...\n" - while read x ; do - arg1=`echo "$x" | cut -d"&" -f1 | sed 's:[]\[\^\$\.\*\/\"]:\\\\&:g'`; - arg2=`echo "$x" | cut -d"&" -f2 | sed 's:[]\[\^\$\.\*\/\"]:\\\\&:g'`; - - sed -e 's/'"$arg1"'/'"$arg2"'/' "${i%.*}TMP.md" > "${i%.*}TMP.TEST.md"; - cat -s "${i%.*}TMP.TEST.md" > "${i%.*}TMP.md"; - done < ./source/lastFilter - - cat "${i%.*}TMP.md" > "${i%.*}.md"; - - # delete temporary files - rm "${i%.*}TMP.md"; - rm "${i%.*}TMP.TEST.md"; + rm "${i%.*}TMP.html" done # delete empty files @@ -160,64 +52,4 @@ if [ "$1" = "-c" ]; then rm "$i"; echo "$(tput setaf 9)$i deleted"; done - - ### create new folder and move converted files - # create folder info and list all files and folders - mkdir info; - echo "$(tput setaf 11)Create folder info and lists od files..."; - find ./docs.it4i.cz -name "*.png" -type f > ./info/list_image; - find ./docs.it4i.cz -name "*.jpg" -type f >> ./info/list_image; - find ./docs.it4i.cz -name "*.jpeg" -type f >> ./info/list_image; - find ./docs.it4i.cz -name "*.md" -type f> ./info/list_md; - find ./docs.it4i.cz -type d | sort > ./info/list_folder; - - count=$(find ./docs.it4i.cz -name "*.md" -type f | wc -l) - - echo "$count" - - if [ $count -eq 150 ]; then - mkdir docs.it4i; - (while read i; - do - mkdir "./docs.it4i/$i"; - done) < ./source/list_folder - - # move md files to folder converted - echo "$(tput setaf 11)Moved md files..."; - while read a b ; do - mv "$a" "./docs.it4i/$b"; - done < <(paste ./info/list_md ./source/list_md_mv) - - # copy jpg, jpeg and png to folder converted - echo "$(tput setaf 11)Copy image files..."; - while read a b ; do - cp "$a" "./docs.it4i/$b"; - done < <(paste ./info/list_image ./source/list_image_mv) - cp ./docs.it4i.cz/salomon/salomon ./docs.it4i/salomon/salomon - cp ./docs.it4i.cz/salomon/salomon-2 ./docs.it4i/salomon/salomon-2 - cp ./docs.it4i/salomon/resource-allocation-and-job-execution/fairshare_formula.png ./docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution/fairshare_formula.png - cp ./docs.it4i/salomon/resource-allocation-and-job-execution/job_sort_formula.png ./docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution/job_sort_formula.png - cp ./docs.it4i/salomon/software/debuggers/vtune-amplifier.png ./docs.it4i/anselm-cluster-documentation/software/debuggers/vtune-amplifier.png - cp ./docs.it4i/salomon/software/debuggers/Snmekobrazovky20160708v12.33.35.png ./docs.it4i/anselm-cluster-documentation/software/debuggers/Snmekobrazovky20160708v12.33.35.png - cp ./docs.it4i.cz/virtualization-job-workflow ./docs.it4i/anselm-cluster-documentation/software/ - cp ./docs.it4i.cz/anselm-cluster-documentation/anyconnecticon.jpg ./docs.it4i/salomon/accessing-the-cluster/anyconnecticon.jpg - cp ./docs.it4i.cz/anselm-cluster-documentation/anyconnectcontextmenu.jpg ./docs.it4i/salomon/accessing-the-cluster/anyconnectcontextmenu.jpg - cp ./docs.it4i.cz/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc/TightVNC_login.png ./docs.it4i/salomon/software/debuggers/TightVNC_login.png - - # list all files and folder converted - find ./docs.it4i -name "*.png" -type f > ./info/list_image_converted; - find ./docs.it4i -name "*.jpg" -type f >> ./info/list_image_converted; - find ./docs.it4i -name "*.jpeg" -type f >> ./info/list_image_converted; - find ./docs.it4i -name "*.md" -type f> ./info/list_md_converted; - find ./docs.it4i -type d | sort > ./info/list_folder_converted; - - echo "$(tput setaf 13)COMPLETED...$(tput setaf 15)"; - else - printf "\n\n$(tput setaf 9)Can not create a folder docs.it4i, because the number of MD files disagrees. The converted files remain in the folder docs.it4i.cz !!!!...$(tput setaf 15)\n\n"; - fi - else - printf "\n\n$(tput setaf 9)Folder docs.it4i.cz not exists!!!!...$(tput setaf 15)\n\nRun html_md.sh -w\n\n"; - fi - ENDTIME=$(date +%s) - echo "It takes $(($ENDTIME - $STARTTIME)) seconds to complete this task..." fi diff --git a/scripts/preklopeni_dokumentace/source/formatting b/scripts/preklopeni_dokumentace/source/formatting deleted file mode 100644 index 5ef039f1873c98e6f1e406b544bad0de5887ce03..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/formatting +++ /dev/null @@ -1,54 +0,0 @@ - []()& - **[]()&** -- - & -.[]()&. -<!-- -->& -### []()[]()&### -### []()&### -### **&### -### class="n">&### -### Gnome on Windows**&### Gnome on Windows -### Notes **&### Notes -**Summary&**Summary** -Tape Library T950B**&**Tape Library T950B** - ****The R version 3.0.1 is available on Anselm, along with GUI interface&**The R version 3.0.1 is available on Anselm, along with GUI interface -^& -Input:** FASTQ file.&Input: FASTQ file. -Output:** FASTQ file plus an HTML file containing statistics on the&Output: FASTQ file plus an HTML file containing statistics on the -*Figure 2.****** FASTQ file.***&*Figure 2.**FASTQ file.** -Component:** Hpg-aligner.****&Component:** Hpg-aligner.** -Input:** VCF&Input:** VCF** -the corresponding QC and functional annotations.&the corresponding QC and functional annotations.** -Core features:**&**Core features:** -Regulatory:**&**Regulatory:** -Functional annotation**&**Functional annotation** -Variation**&**Variation** -Systems biology**&**Systems biology** -[VNC](../../../salomon/accessing-the-cluster/graphical-user-interface/vnc.html)**.&**[VNC](../../../salomon/accessing-the-cluster/graphical-user-interface/vnc.html)**. -Workaround:**&**Workaround:** --g** : Generates extra debugging information usable by GDB. -g3&-g** : Generates extra debugging information usable by GDB. -g3** --O0** : Suppress all optimizations.&-O0** : Suppress all optimizations.** -nodes.****&nodes. -###Compute Nodes Without Accelerator**&###Compute Nodes Without Accelerator -###Compute Nodes With MIC Accelerator**&###Compute Nodes With MIC Accelerator -###Compute Nodes With GPU Accelerator**&###Compute Nodes With GPU Accelerator -###Fat Compute Nodes**&###Fat Compute Nodes -**Figure Anselm bullx B510 servers****&**Figure Anselm bullx B510 servers** -### Compute Nodes Summary********&### Compute Nodes Summary -<p><a href="x-window-system/cygwin-and-x11-forwarding.html" If no able to forward X11 using PuTTY to CygwinX</a>&[If no able to forward X11 using PuTTY to CygwinX](x-window-system/cygwin-and-x11-forwarding.html) -<p><a href="http://x.cygwin.com/" Install Cygwin</a>&[Install Cygwin](http://x.cygwin.com/) -Component:**> Hpg-Fastq & FastQC&Component:**> Hpg-Fastq & FastQC** -Input:** BAM file.&Input:** BAM file.** -Output:** BAM file plus an HTML file containing statistics. &Output:** BAM file plus an HTML file containing statistics.** -Component:** GATK.&Component:** GATK.** -Input:** BAM&Input:** BAM** -Output:** VCF&Output:** VCF** -Variant Call Format (VCF)**&**Variant Call Format (VCF)** -</span></span>& -/ansys_inc/shared_les/licensing/lic_admin/anslic_admin&/ansys_inc/shared_les/licensing/lic_admin/anslic_admin -<td align="left">& | -However, users need only manage User and CA certificates. Note that your&**However, users need only manage User and CA certificates. Note that your -X.509 PKI certificates for communication with us.&X.509 PKI certificates for communication with us.** -PuTTYgen**&**PuTTYgen** -key](../ssh-keys.html) file if Pageant ****SSH&key](../ssh-keys.html) file if Pageant **SSH -authentication agent is not used.&authentication agent is not used.** diff --git a/scripts/preklopeni_dokumentace/source/lastFilter b/scripts/preklopeni_dokumentace/source/lastFilter deleted file mode 100644 index 00641541ae06f6de1a53cafb4d66b091ce6d5c1e..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/lastFilter +++ /dev/null @@ -1,70 +0,0 @@ ->key.&key. -</span>& -`.ssh`&`.ssh` directory: 700 (drwx------) -Authorized_keys,&Authorized_keys, known_hosts and public key (`.pub` file): `644 (-rw-r--r--)` -Private key&Private key (`id_rsa/id_rsa.ppk` ): `600 (-rw-------)` - (gnome-session:23691): WARNING **: Cannot open display:& (gnome-session:23691): WARNING **: Cannot open display:** - & - & -Search for the localhost and port number (in this case&**Search for the localhost and port number (in this case -Preferences](gdmscreensaver.png/@@images/8e80a92f-f691-4d92-8e62-344128dcc00b.png "Screensaver Preferences")](../../../../salomon/gnome_screen.jpg.1)& -maximum performance**&maximum performance -Better performance** is obtained by logging on the allocated compute&**Better performance** is obtained by logging on the allocated compute - class="discreet">& - id="result_box"& - class="hps alt-edited">stated &stated in the previous example. -Water-cooled Compute Nodes With MIC Accelerator**&**Water-cooled Compute Nodes With MIC Accelerator** -Access from PRACE network:**&**Access from PRACE network:** -Access from public Internet:**&**Access from public Internet:** -Access from PRACE network:**&**Access from PRACE network:** -Access from public Internet:**&**Access from public Internet:** ->VPN client installation&VPN client installation -Install](https://docs.it4i.cz/salomon/vpn_web_install_2.png/@@images/c2baba93-824b-418d-b548-a73af8030320.png "VPN Install")](../vpn_web_install_2.png)& - [](Salomon_IB_topology.png)& -###IB single-plane topology - ICEX Mcell**&###IB single-plane topology - ICEX Mcell -As shown in a diagram [IB&As shown in a diagram & -and [ & -SCRATCH](storage.html#shared-filesystems).& -There are two main shared file systems on Salomon cluster, the [&There are two main shared file systems on Salomon cluster, the [HOME](storage.html#home)and [SCRATCH](storage.html#shared-filesystems). ->Disk usage and quota commands&Disk usage and quota commands ->OpenCL&### OpenCL -Execution on host**&**Execution on host** -Execution on host - MPI processes distributed over multiple&**Execution on host - MPI processes distributed over multiple -Host only node-file:**&**Host only node-file:** -MIC only node-file**:&MIC only node-file: -Host and MIC node-file**:&Host and MIC node-file: -interface Rstudio&interface Rstudio** -### >&### -3. >>&3. > -- >>&- > ->Usage with MPI&Usage with MPI ->>&> - id="result_box"> & -<span& ->Introduction&Introduction ->Options&Options -***[ANSYS&**[ANSYS -Channel partner](http://www.ansys.com/)***&Channel partner](http://www.ansys.com/)** -Multiphysics)-**Commercial.**&Multiphysics)-**Commercial. ->1. Common way to run Fluent over pbs file&1. Common way to run Fluent over pbs file ->2. Fast way to run Fluent from command line&2. Fast way to run Fluent from command line -**Academic**&**Academic -***&** -id="result_box"& -</span>& -[](Fluent_Licence_2.jpg)& -Failed to initialize connection subsystem Win 8.1 - 02-10-15 MS&**Failed to initialize connection subsystem Win 8.1 - 02-10-15 MS diff --git a/scripts/preklopeni_dokumentace/source/list_folder b/scripts/preklopeni_dokumentace/source/list_folder deleted file mode 100644 index 8b4067625d434fc96dcfc90fa6d43fa549d6f30f..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/list_folder +++ /dev/null @@ -1,34 +0,0 @@ -anselm-cluster-documentation -anselm-cluster-documentation/accessing-the-cluster -anselm-cluster-documentation/accessing-the-cluster/shell-and-data-access -anselm-cluster-documentation/resource-allocation-and-job-execution -anselm-cluster-documentation/software -anselm-cluster-documentation/software/ansys -anselm-cluster-documentation/software/chemistry -anselm-cluster-documentation/software/comsol -anselm-cluster-documentation/software/debuggers -anselm-cluster-documentation/software/intel-suite -anselm-cluster-documentation/software/mpi-1 -anselm-cluster-documentation/software/numerical-languages -anselm-cluster-documentation/software/numerical-libraries -anselm-cluster-documentation/software/omics-master-1 -anselm-cluster-documentation/storage-1 -get-started-with-it4innovations -get-started-with-it4innovations/accessing-the-clusters -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer -get-started-with-it4innovations/obtaining-login-credentials -salomon -salomon/accessing-the-cluster -salomon/hardware-overview-1 -salomon/network-1 -salomon/resource-allocation-and-job-execution -salomon/software -salomon/software/ansys -salomon/software/chemistry -salomon/software/comsol -salomon/software/debuggers -salomon/software/intel-suite -salomon/software/mpi-1 -salomon/software/numerical-languages -salomon/storage diff --git a/scripts/preklopeni_dokumentace/source/list_image_mv b/scripts/preklopeni_dokumentace/source/list_image_mv deleted file mode 100644 index d543838d34c7422ee24e0a927e33c73f0f969416..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/list_image_mv +++ /dev/null @@ -1,102 +0,0 @@ -anselm-cluster-documentation/software/omics-master-1/fig2.png -anselm-cluster-documentation/software/omics-master-1/fig5.png -anselm-cluster-documentation/software/omics-master-1/fig6.png -anselm-cluster-documentation/software/omics-master-1/fig3.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/TightVNC_login.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/putty-tunnel.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/gnome-terminal.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/gdmscreensaver.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/gnome-compute-nodes-over-vnc.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/gdmdisablescreensaver.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwinX11forwarding.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/XWinlistentcp.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygenerator_004V.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygenerator_003V.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PageantV.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygenerator_001V.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuTTY_host_Salomon.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuTTY_keyV.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygenerator_005V.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuTTY_save_Salomon.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygeneratorV.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuTTY_open_Salomon.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygenerator_002V.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/PuttyKeygenerator_006V.png -salomon/accessing-the-cluster/copy_of_vpn_web_install_3.png -salomon/accessing-the-cluster/vpn_contacting.png -salomon/resource-allocation-and-job-execution/rswebsalomon.png -salomon/accessing-the-cluster/vpn_successfull_connection.png -salomon/accessing-the-cluster/vpn_web_install_2.png -salomon/accessing-the-cluster/vpn_web_login_2.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/gnome_screen.png -salomon/network-1/IBsingleplanetopologyAcceleratednodessmall.png -salomon/network-1/IBsingleplanetopologyICEXMcellsmall.png -salomon/network-1/Salomon_IB_topology.png -salomon/network-1/7D_Enhanced_hypercube.png -salomon/accessing-the-cluster/vpn_web_login.png -salomon/accessing-the-cluster/vpn_login.png -salomon/software/debuggers/totalview2.png -salomon/software/debuggers/Snmekobrazovky20160211v14.27.45.png -salomon/software/debuggers/ddt1.png -salomon/software/debuggers/totalview1.png -salomon/software/debuggers/Snmekobrazovky20160708v12.33.35.png -salomon/software/intel-suite/Snmekobrazovky20151204v15.35.12.png -salomon/software/ansys/AMsetPar1.png -salomon/accessing-the-cluster/vpn_contacting_https_cluster.png -salomon/accessing-the-cluster/vpn_web_download.png -salomon/accessing-the-cluster/vpn_web_download_2.png -salomon/accessing-the-cluster/vpn_contacting_https.png -salomon/accessing-the-cluster/vpn_web_install_4.png -anselm-cluster-documentation/software/omics-master-1/fig7.png -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vncviewer.png -salomon/resource-allocation-and-job-execution/job_sort_formula.png -salomon/resource-allocation-and-job-execution/fairshare_formula.png -anselm-cluster-documentation/resource-allocation-and-job-execution/rsweb.png -anselm-cluster-documentation/quality2.png -anselm-cluster-documentation/turbovncclientsetting.png -get-started-with-it4innovations/obtaining-login-credentials/Authorization_chain.png -anselm-cluster-documentation/scheme.png -anselm-cluster-documentation/quality3.png -anselm-cluster-documentation/legend.png -anselm-cluster-documentation/bullxB510.png -salomon/software/debuggers/vtune-amplifier.png -anselm-cluster-documentation/software/debuggers/totalview2.png -anselm-cluster-documentation/software/debuggers/Snmekobrazovky20141204v12.56.36.png -anselm-cluster-documentation/software/debuggers/ddt1.png -anselm-cluster-documentation/software/debuggers/totalview1.png -anselm-cluster-documentation/software/numerical-languages/Matlab.png -anselm-cluster-documentation/quality1.png -anselm-cluster-documentation/software/omics-master-1/fig1.png -anselm-cluster-documentation/software/omics-master-1/fig8.png -salomon/software/debuggers/report.png -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpnuiV.png -anselm-cluster-documentation/software/omics-master-1/fig4.png -anselm-cluster-documentation/software/omics-master-1/fig7x.png -anselm-cluster-documentation/software/omics-master-1/fig9.png -salomon/software/ansys/Fluent_Licence_2.jpg -salomon/software/ansys/Fluent_Licence_4.jpg -salomon/software/ansys/Fluent_Licence_1.jpg -salomon/software/ansys/Fluent_Licence_3.jpg -anselm-cluster-documentation/accessing-the-cluster/Anselmprofile.jpg -anselm-cluster-documentation/accessing-the-cluster/anyconnecticon.jpg -anselm-cluster-documentation/accessing-the-cluster/anyconnectcontextmenu.jpg -anselm-cluster-documentation/accessing-the-cluster/logingui.jpg -anselm-cluster-documentation/software/ansys/Fluent_Licence_2.jpg -anselm-cluster-documentation/software/ansys/Fluent_Licence_4.jpg -anselm-cluster-documentation/software/ansys/Fluent_Licence_1.jpg -anselm-cluster-documentation/software/ansys/Fluent_Licence_3.jpg -anselm-cluster-documentation/accessing-the-cluster/firstrun.jpg -anselm-cluster-documentation/accessing-the-cluster/successfullconnection.jpg -salomon/sgi-c1104-gp1.jpeg -salomon/salomon-1.jpeg -salomon/hardware-overview-1/uv-2000.jpeg -salomon/salomon-3.jpeg -salomon/salomon-4.jpeg -anselm-cluster-documentation/accessing-the-cluster/loginwithprofile.jpeg -anselm-cluster-documentation/accessing-the-cluster/instalationfile.jpeg -anselm-cluster-documentation/accessing-the-cluster/successfullinstalation.jpeg -anselm-cluster-documentation/accessing-the-cluster/java_detection.jpeg -anselm-cluster-documentation/accessing-the-cluster/executionaccess.jpeg -anselm-cluster-documentation/accessing-the-cluster/downloadfilesuccessfull.jpeg -anselm-cluster-documentation/accessing-the-cluster/executionaccess2.jpeg -anselm-cluster-documentation/accessing-the-cluster/login.jpeg diff --git a/scripts/preklopeni_dokumentace/source/list_md_mv b/scripts/preklopeni_dokumentace/source/list_md_mv deleted file mode 100644 index f4a48544c0fdb926e6a3f96f54de9ba2701edaee..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/list_md_mv +++ /dev/null @@ -1,150 +0,0 @@ -get-started-with-it4innovations/accessing-the-clusters/introduction.md -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc.md -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md -get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/introduction.md -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.md -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md -get-started-with-it4innovations/obtaining-login-credentials/obtaining-login-credentials.md -get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md -get-started-with-it4innovations/applying-for-resources.md -salomon/introduction.md -salomon/resource-allocation-and-job-execution/introduction.md -salomon/resource-allocation-and-job-execution/resources-allocation-policy.md -salomon/resource-allocation-and-job-execution/job-submission-and-execution.md -salomon/resource-allocation-and-job-execution/capacity-computing.md -salomon/resource-allocation-and-job-execution/job-priority.md -salomon/prace.md -salomon/environment-and-modules.md -salomon/network-1/7d-enhanced-hypercube.md -salomon/network-1/ib-single-plane-topology.md -salomon/network-1/network.md -salomon/accessing-the-cluster/outgoing-connections.md -salomon/accessing-the-cluster/vpn-access.md -salomon/software/debuggers/intel-vtune-amplifier.md -salomon/software/debuggers/summary.md -salomon/software/debuggers/allinea-performance-reports.md -salomon/software/debuggers/valgrind.md -salomon/software/debuggers/allinea-ddt.md -salomon/software/debuggers/aislinn.md -salomon/software/debuggers/vampir.md -salomon/software/debuggers/total-view.md -salomon/software/numerical-languages/introduction.md -salomon/software/numerical-languages/octave.md -salomon/software/numerical-languages/matlab.md -salomon/software/numerical-languages/r.md -salomon/software/operating-system.md -salomon/software/mpi-1/mpi.md -salomon/software/mpi-1/mpi4py-mpi-for-python.md -salomon/software/mpi-1/Running_OpenMPI.md -salomon/software/intel-xeon-phi.md -salomon/software/chemistry/phono3py.md -salomon/software/chemistry/molpro.md -salomon/software/chemistry/nwchem.md -salomon/software/ansys/ansys.md -salomon/software/compilers.md -salomon/software/intel-suite/intel-compilers.md -salomon/software/intel-suite/intel-inspector.md -salomon/software/intel-suite/intel-integrated-performance-primitives.md -salomon/software/intel-suite/intel-advisor.md -salomon/software/intel-suite/intel-trace-analyzer-and-collector.md -salomon/software/intel-suite/intel-mkl.md -salomon/software/intel-suite/intel-parallel-studio-introduction.md -salomon/software/intel-suite/intel-tbb.md -salomon/software/intel-suite/intel-debugger.md -salomon/software/debuggers.md -salomon/software/java.md -salomon/software/comsol/comsol-multiphysics.md -salomon/software/comsol/licensing-and-available-versions.md -salomon/software/ansys/ansys-mechanical-apdl.md -salomon/software/ansys/setting-license-preferences.md -salomon/software/ansys/workbench.md -salomon/software/ansys/licensing.md -salomon/software/ansys/ansys-fluent.md -salomon/software/ansys/ansys-cfx.md -salomon/software/ansys/ansys-products-mechanical-fluent-cfx-mapdl.md -salomon/software/ansys/ansys-ls-dyna.md -salomon/storage/cesnet-data-storage.md -salomon/storage/storage.md -salomon/accessing-the-cluster.md -salomon/hardware-overview-1/hardware-overview.md -anselm-cluster-documentation/introduction.md -anselm-cluster-documentation/hardware-overview.md -anselm-cluster-documentation/resource-allocation-and-job-execution/introduction.md -anselm-cluster-documentation/resource-allocation-and-job-execution/resources-allocation-policy.md -anselm-cluster-documentation/resource-allocation-and-job-execution/job-submission-and-execution.md -anselm-cluster-documentation/resource-allocation-and-job-execution/capacity-computing.md -anselm-cluster-documentation/resource-allocation-and-job-execution/job-priority.md -anselm-cluster-documentation/prace.md -anselm-cluster-documentation/storage-1/cesnet-data-storage.md -anselm-cluster-documentation/storage-1/storage.md -anselm-cluster-documentation/environment-and-modules.md -anselm-cluster-documentation/accessing-the-cluster/outgoing-connections.md -anselm-cluster-documentation/accessing-the-cluster/shell-and-data-access/shell-and-data-access.md -anselm-cluster-documentation/accessing-the-cluster/vpn-access.md -anselm-cluster-documentation/software/nvidia-cuda.md -anselm-cluster-documentation/software/debuggers/papi.md -anselm-cluster-documentation/software/debuggers/scalasca.md -anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md -anselm-cluster-documentation/software/debuggers/summary.md -anselm-cluster-documentation/software/debuggers/cube.md -anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md -anselm-cluster-documentation/software/debuggers/valgrind.md -anselm-cluster-documentation/software/debuggers/allinea-ddt.md -anselm-cluster-documentation/software/debuggers/score-p.md -anselm-cluster-documentation/software/debuggers/vampir.md -anselm-cluster-documentation/software/debuggers/total-view.md -anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md -anselm-cluster-documentation/software/kvirtualization.md -anselm-cluster-documentation/software/gpi2.md -anselm-cluster-documentation/software/paraview.md -anselm-cluster-documentation/software/numerical-languages/introduction.md -anselm-cluster-documentation/software/numerical-languages/octave.md -anselm-cluster-documentation/software/numerical-languages/copy_of_matlab.md -anselm-cluster-documentation/software/numerical-languages/matlab.md -anselm-cluster-documentation/software/numerical-languages/r.md -anselm-cluster-documentation/software/operating-system.md -anselm-cluster-documentation/software/mpi-1/mpi.md -anselm-cluster-documentation/software/mpi-1/mpi4py-mpi-for-python.md -anselm-cluster-documentation/software/mpi-1/running-mpich2.md -anselm-cluster-documentation/software/mpi-1/Running_OpenMPI.md -anselm-cluster-documentation/software/intel-xeon-phi.md -anselm-cluster-documentation/software/chemistry/molpro.md -anselm-cluster-documentation/software/chemistry/nwchem.md -anselm-cluster-documentation/software/ansys.md -anselm-cluster-documentation/software/isv_licenses.md -anselm-cluster-documentation/software/compilers.md -anselm-cluster-documentation/software/intel-suite/intel-compilers.md -anselm-cluster-documentation/software/intel-suite/intel-integrated-performance-primitives.md -anselm-cluster-documentation/software/intel-suite/intel-mkl.md -anselm-cluster-documentation/software/intel-suite/intel-parallel-studio-introduction.md -anselm-cluster-documentation/software/intel-suite/intel-tbb.md -anselm-cluster-documentation/software/intel-suite/intel-debugger.md -anselm-cluster-documentation/software/debuggers.md -anselm-cluster-documentation/software/omics-master-1/priorization-component-bierapp.md -anselm-cluster-documentation/software/omics-master-1/diagnostic-component-team.md -anselm-cluster-documentation/software/omics-master-1/overview.md -anselm-cluster-documentation/software/openfoam.md -anselm-cluster-documentation/software/java.md -anselm-cluster-documentation/software/comsol/comsol-multiphysics.md -anselm-cluster-documentation/software/intel-suite.md -anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md -anselm-cluster-documentation/software/ansys/ansys-fluent.md -anselm-cluster-documentation/software/ansys/ansys-cfx.md -anselm-cluster-documentation/software/ansys/ls-dyna.md -anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md -anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md -anselm-cluster-documentation/software/numerical-libraries/gsl.md -anselm-cluster-documentation/software/numerical-libraries/trilinos.md -anselm-cluster-documentation/software/numerical-libraries/hdf5.md -anselm-cluster-documentation/software/numerical-libraries/intel-numerical-libraries.md -anselm-cluster-documentation/software/numerical-libraries/fftw.md -anselm-cluster-documentation/software/numerical-libraries/petsc.md -anselm-cluster-documentation/network.md -anselm-cluster-documentation/remote-visualization.md -anselm-cluster-documentation/compute-nodes.md -get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md -index.md diff --git a/scripts/preklopeni_dokumentace/source/list_rm b/scripts/preklopeni_dokumentace/source/list_rm deleted file mode 100644 index 8c1e208065c9b1dd5d32010852ffeaeaf3485f5b..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/list_rm +++ /dev/null @@ -1,101 +0,0 @@ -./docs.it4i.cz/anselm-cluster-documentation/accessing-the-cluster.html -./docs.it4i.cz/anselm-cluster-documentation/accessing-the-cluster/storage-1.html -./docs.it4i.cz/anselm-cluster-documentation/accessing-the-cluster/x-window-and-vnc.html -./docs.it4i.cz/anselm-cluster-documentation.html -./docs.it4i.cz/anselm-cluster-documentation/icon.jpg -./docs.it4i.cz/anselm-cluster-documentation/resource-allocation-and-job-execution.html -./docs.it4i.cz/anselm-cluster-documentation/software.1.html -./docs.it4i.cz/anselm-cluster-documentation/software/anselm-cluster-documentation/software/mpi-1/running-mpich2.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/ansys-cfx-pbs-file/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/ansys-fluent-pbs-file/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/ansys-ls-dyna-pbs-file/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/ansys-mapdl-pbs-file/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/ansys-products-mechanical-fluent-cfx-mapdl.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/licensing.html -./docs.it4i.cz/anselm-cluster-documentation/software/ansys/ls-dyna-pbs-file/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/chemistry.html -./docs.it4i.cz/anselm-cluster-documentation/software/comsol.html -./docs.it4i.cz/anselm-cluster-documentation/software/debuggers/mympiprog_32p_2014-10-15_16-56.html -./docs.it4i.cz/anselm-cluster-documentation/software/mpi-1.html -./docs.it4i.cz/anselm-cluster-documentation/software/numerical-languages.1.html -./docs.it4i.cz/anselm-cluster-documentation/software/numerical-languages.html -./docs.it4i.cz/anselm-cluster-documentation/software/numerical-libraries.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig1.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig1.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig2.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig2.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig3.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig3.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig4.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig4.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig5.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig5.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig6.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig6.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig7.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig7.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig7x.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig7x.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig8.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig8.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig9.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/fig9.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/table1.png/image_view_fullscreen.html -./docs.it4i.cz/anselm-cluster-documentation/software/omics-master-1/images/table1.png/view.html -./docs.it4i.cz/anselm-cluster-documentation/software/virtualization.html -./docs.it4i.cz/anselm-cluster-documentation/storage-1.html -./docs.it4i.cz/anselm-cluster-documentation/storage.html -./docs.it4i.cz/anselm.html -./docs.it4i.cz/changelog.html -./docs.it4i.cz/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface.html -./docs.it4i.cz/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/cygwin-and-x11-forwarding.html -./docs.it4i.cz/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-and-vnc.html -./docs.it4i.cz/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/putty.1.html -./docs.it4i.cz/get-started-with-it4innovations/changelog.html -./docs.it4i.cz/get-started-with-it4innovations/obtaining-login-credentials.html -./docs.it4i.cz/links.html -./docs.it4i.cz/pbspro-documentation.html -./docs.it4i.cz/robots.txt -./docs.it4i.cz/salomon/accessing-the-cluster/graphical-user-interface.html -./docs.it4i.cz/salomon/accessing-the-cluster/graphical-user-interface/vnc.html -./docs.it4i.cz/salomon/accessing-the-cluster/shell-and-data-access/shell-and-data-access.html -./docs.it4i.cz/salomon/compute-nodes.html -./docs.it4i.cz/salomon/hardware-overview-1.1.html -./docs.it4i.cz/salomon.html -./docs.it4i.cz/salomon/list_of_modules.html -./docs.it4i.cz/salomon/network-1.html -./docs.it4i.cz/salomon/network-1/IB single-plane topology - Accelerated nodes.pdf/view.html -./docs.it4i.cz/salomon/network-1/ib-single-plane-topology/IB single-plane topology - ICEX Mcell.pdf/view.html -./docs.it4i.cz/salomon/network-1/ib-single-plane-topology/schematic-representation-of-the-salomon-cluster-ib-single-plain-topology-hypercube-dimension-0.html -./docs.it4i.cz/salomon/resource-allocation-and-job-execution.html -./docs.it4i.cz/salomon/software/ansys/ansys-cfx-pbs-file/view.html -./docs.it4i.cz/salomon/software/ansys/ansys-fluent-pbs-file/view.html -./docs.it4i.cz/salomon/software/ansys/ansys-ls-dyna-pbs-file/view.html -./docs.it4i.cz/salomon/software/ansys/ansys-mapdl-pbs-file/view.html -./docs.it4i.cz/salomon/software/ansys/ls-dyna-pbs-file/view.html -./docs.it4i.cz/salomon/software/chemistry.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/gofree-cond1.sh/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/INCAR/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/KPOINTS/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/poscar-si/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/POTCAR/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/prepare.sh/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/run.sh/view.html -./docs.it4i.cz/salomon/software/chemistry/phono3py-input/submit.sh/view.html -./docs.it4i.cz/salomon/software/comsol.html -./docs.it4i.cz/salomon/software/debuggers/mympiprog_32p_2014-10-15_16-56.html -./docs.it4i.cz/salomon/software/debuggers/score-p.html -./docs.it4i.cz/salomon/software.html -./docs.it4i.cz/salomon/software/intel-suite.html -./docs.it4i.cz/salomon/software/isv_licenses.html -./docs.it4i.cz/salomon/software/mpi-1.html -./docs.it4i.cz/salomon/software/numerical-languages.1.html -./docs.it4i.cz/salomon/software/numerical-languages.html -./docs.it4i.cz/salomon/storage.html -./docs.it4i.cz/sitemap.html -./docs.it4i.cz/whats-new.html -./docs.it4i.cz/salomon/index.html -./docs.it4i.cz/get-started-with-it4innovations/introduction.html diff --git a/scripts/preklopeni_dokumentace/source/repairIMG b/scripts/preklopeni_dokumentace/source/repairIMG deleted file mode 100644 index 5a8d1763fd87d5d3e2e40aa52b0ab08aa6e43f75..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/repairIMG +++ /dev/null @@ -1,123 +0,0 @@ -& -& - -2](../executionaccess2.jpg/@@images/bed3998c-4b82-4b40-83bd-c3528dde2425.jpeg "Execution access 2")& -& -& -& -& -& -& -& -& -& -& -& -**&& -& -& - &increases.](fig5.png.1 "fig5.png") -out.](fig1.png "Fig 1")&out.](fig1.png "Fig 1") -& -operation.](images/fig3.png "fig3.png")&operation.](fig3.png "fig3.png")& -where the position is ambiguous.](images/fig4.png)&where the position is ambiguous.](fig4.png) -genomic coordinates.](images/fig6.png.1 "fig6.png")&genomic coordinates.](fig6.png.1 "fig6.png") - [](cygwin-and-x11-forwarding.html)& -[](putty-tunnel.png)& -& -[****](TightVNC_login.png)& -[](https://docs.it4i.cz/get-started-with-it4innovations/gnome_screen.jpg)& -[](gdmdisablescreensaver.png)& -[](../../../../salomon/gnome_screen.jpg.1)& -[](gnome-terminal.png)& -[](gnome-compute-nodes-over-vnc.png)& - [](PageantV.png)& - [](PuTTY_host_Salomon.png)& - [](PuTTY_keyV.png)& - [](PuTTY_save_Salomon.png)& - [](PuTTY_open_Salomon.png)& - [](PuttyKeygeneratorV.png)& - [](PuttyKeygenerator_001V.png)& - [](PuttyKeygenerator_002V.png)& - [](20150312_143443.png)& - [](PuttyKeygenerator_004V.png)& - [](PuttyKeygenerator_005V.png)& - [](PuttyKeygenerator_006V.png)& -[](../vpn_web_login.png)& -Install](https://docs.it4i.cz/salomon/vpn_web_login_2.png/@@images/be923364-0175-4099-a363-79229b88e252.png "VPN Install")](../vpn_web_login_2.png)& -Install](https://docs.it4i.cz/salomon/vpn_web_install_2.png/@@images/c2baba93-824b-418d-b548-a73af8030320.png "VPN Install")](../vpn_web_install_2.png)[ -Install](https://docs.it4i.cz/salomon/copy_of_vpn_web_install_3.png/@@images/9c34e8ad-64b1-4e1d-af3a-13c7a18fbca4.png "VPN Install")](../copy_of_vpn_web_install_3.png)& -Install](https://docs.it4i.cz/salomon/vpn_web_install_4.png/@@images/4cc26b3b-399d-413b-9a6c-82ec47899585.png "VPN Install")](../vpn_web_install_4.png)& -Install](https://docs.it4i.cz/salomon/vpn_web_download.png/@@images/06a88cce-5f51-42d3-8f0a-f615a245beef.png "VPN Install")](../vpn_web_download.png)& -Install](https://docs.it4i.cz/salomon/vpn_web_download_2.png/@@images/3358d2ce-fe4d-447b-9e6c-b82285f9796e.png "VPN Install")](../vpn_web_download_2.png)& -& -[](../vpn_contacting_https_cluster.png)& -Cluster](https://docs.it4i.cz/salomon/vpn_contacting_https.png/@@images/ff365499-d07c-4baf-abb8-ce3e15559210.png "VPN Contacting Cluster")](../vpn_contacting_https.png)& -[](../../anselm-cluster-documentation/anyconnecticon.jpg)& -[](../../anselm-cluster-documentation/anyconnectcontextmenu.jpg)& -[](../vpn_contacting.png)& -login](https://docs.it4i.cz/salomon/vpn_login.png/@@images/5102f29d-93cf-4cfd-8f55-c99c18f196ea.png "VPN login")](../vpn_login.png)& -Connection](https://docs.it4i.cz/salomon/vpn_successfull_connection.png/@@images/45537053-a47f-48b2-aacd-3b519d6770e6.png "VPN Succesfull Connection")](../vpn_successfull_connection.png)& -[](../salomon-2)&& -& -[](salomon)& -& -& -& -[](7D_Enhanced_hypercube.png)& -[](https://docs.it4i.cz/salomon/network-1/ib-single-plane-topology/IB%20single-plane%20topology%20-%20ICEX%20Mcell.pdf)& -[](https://docs.it4i.cz/salomon/network-1/ib-single-plane-topology/IB%20single-plane%20topology%20-%20Accelerated%20nodes.pdf)& -[](Fluent_Licence_1.jpg)& -[](Fluent_Licence_2.jpg)& -[](Fluent_Licence_3.jpg)& -[](Fluent_Licence_4.jpg)& -& -& -[{.image-inline width="451"& -height="513"}](ddt1.png)& -& -[](vtune-amplifier)& -[](totalview1.png)& -[](totalview2.png)& -& -& -& -screensaver](https://docs.it4i.cz/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/vnc/gdmdisablescreensaver.png/@@images/8a4758d9-3027-4ce4-9a90-2d5e88197451.png "Disable lock screen and screensaver")](gdmdisablescreensaver.png)& -[](gnome-terminal.png)& -[](gnome-compute-nodes-over-vnc.png)& -& -genomic coordinates.](fig6.png.1 "fig6.png")&genomic coordinates.](fig6.png) -out.](images/fig1.png "Fig 1")&out.](fig1.png) -operation.](fig3.png "fig3.png")&operation.](fig3.png) -starts.](images/fig7.png "fig7.png")&starts.](fig7.png) -H).](images/fig7x.png "fig7x.png")&H).](fig7x.png) -](images/fig8.png "fig8.png")*&](fig8.png)* -tumor.](images/fig9.png "fig9.png")**&tumor.](fig9.png) -increases.](fig5.png.1 "fig5.png")&increases.](fig5.png) diff --git a/scripts/preklopeni_dokumentace/source/replace b/scripts/preklopeni_dokumentace/source/replace deleted file mode 100644 index a95df81e42d694997a5e586e71a9b7630ecc0196..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/replace +++ /dev/null @@ -1,128 +0,0 @@ -style="text-align: left; float: none; ">& -class="anchor-link">& -class="Apple-converted-space">& -class="discreet visualHighlight">& -class="emphasis">& -class="glossaryItem">& -class="highlightedSearchTerm">& -class="highlightedSearchTerm">SSH</span><span>&highlightedSearchTerm -class="hps">& -class="hps">& -class="hps">More</span> <span class="hps">& -class="hps trans-target-highlight">& -class="internal-link">& -class="internal-link"><span id="result_box" class="short_text"><span& -class="monospace">& -class="monospace">LAPACKE</span> module, which includes Intel's LAPACKE&LAPACKE modelu, which includes Intel's LAPACKE -class="n">& -class="n">& -class="pre">& -class="pun">node_group_key& -class="short_text"><span& -class="smarterwiki-popup-bubble-body"><span& -class="smarterwiki-popup-bubble-links-container"><span& -class="smarterwiki-popup-bubble-links-row">[{.smarterwiki-popup-bubble-link-favicon}](http://maps.google.com/maps?q=HDF5%20icc%20serial%09pthread%09hdf5%2F1.8.13%09%24HDF5_INC%20%24HDF5_SHLIB%09%24HDF5_INC%20%24HDF5_CPP_LIB%09%24HDF5_INC%20%24HDF5_F90_LIB%0A%0AHDF5%20icc%20parallel%20MPI%0A%09pthread%2C%20IntelMPI%09hdf5-parallel%2F1.8.13%09%24HDF5_INC%20%24HDF5_SHLIB%09Not%20supported%09%24HDF5_INC%20%24HDF5_F90_LIB "Search Google Maps"){.smarterwiki-popup-bubble-link}[{.smarterwiki-popup-bubble-link-favicon}](http://www.google.com/search?q=HDF5%20icc%20serial%09pthread%09hdf5%2F1.8.13%09%24HDF5_INC%20%24HDF5_SHLIB%09%24HDF5_INC%20%24HDF5_CPP_LIB%09%24HDF5_INC%20%24HDF5_F90_LIB%0A%0AHDF5%20icc%20parallel%20MPI%0A%09pthread%2C%20IntelMPI%09hdf5-parallel%2F1.8.13%09%24HDF5_INC%20%24HDF5_SHLIB%09Not%20supported%09%24HDF5_INC%20%24HDF5_F90_LIB "Search Google"){.smarterwiki-popup-bubble-link}[](http://www.google.com/search?hl=com&btnI=I'm+Feeling+Lucky&q=HDF5%20icc%20serial%09pthread%09hdf5%2F1.8.13%09%24HDF5_INC%20%24HDF5_SHLIB%09%24HDF5_INC%20%24HDF5_CPP_LIB%09%24HDF5_INC%20%24HDF5_F90_LIB%0A%0AHDF5%20icc%20parallel%20MPI%0A%09pthread%2C%20IntelMPI%09hdf5-parallel%2F1.8.13%09%24HDF5_INC%20%24HDF5_SHLIB%09Not%20supported%09%24HDF5_INC%20%24HDF5_F90_LIB+wikipedia "Search Wikipedia"){.smarterwiki-popup-bubble-link}</span></span></span></span></span>& -class="smarterwiki-popup-bubble-links"><span& -class="smarterwiki-popup-bubble smarterwiki-popup-bubble-active smarterwiki-popup-bubble-flipped"><span&& -class="smarterwiki-popup-bubble-tip"></span><span -</div>& -<div>& -<div class="itemizedlist">& -<div id="d4841e18">& -<div id="d4841e21">& -<div id="d4841e24">& -<div id="d4841e27">& -<div id="d4841e30">& -<div id="d4841e34">& -<div id="d4841e37">& -{.external& -</span>& -<span& -[<span class="anchor-link">& -<span class="discreet">& -<span class="discreet"></span>& -<span class="glossaryItem">& -<span class="hps">& -<span class="hps alt-edited">& -<span class="listitem">& -<span class="n">& -<span class="s1">& -<span class="WYSIWYG_LINK">& -<span dir="auto">& -<span id="__caret">& -<span id="__caret"><span id="__caret"></span></span>& -(<span id="result_box">& -<span id="result_box" class="short_text"><span class="hps">& -<span id="result_box"><span class="hps">& -</span></span>& -</span> <span& -<span><span>& -</span> <span class="hps">& -</span> <span class="hps">who have a valid</span> <span& -<span><span class="monospace">& -<span><span>Introduction&###Introduction -</span></span><span><span>& -</span></span></span></span><span><span>& -</span></span><span><span><span><span>& -.<span style="text-align: left; "> </span>& -<span style="text-align: start; ">& -{.state-missing-value - style="text-align: left; float: none; ">& - style="text-align: left; float: none; ">& - style="text-align: left; float: none; ">change it&change it to - style="text-align: left; float: none; ">Check Putty settings:& - style="text-align: left; float: none; ">Enable X11&Enable X11 - style="text-align: left; float: none; "> & -style="text-align: start; ">& -style="text-align: start; float: none; ">& -.text}.& -.text}& -ssh-connection style="text-alignstart; "}& -<div& -</div>& -{.anchor-link}& -{.code-basic style="text-align: start; "}& -{.code .highlight .white .shell}& -{.docutils .literal}& -{.email-link}& -{.external-link}& -{.fragment}& -{.image-inline}& -{.image-inline width="451" height="513"}& -{.internal-link}& -{.literal-block}& -{.mw-redirect}& -{#parent-fieldname-title}& -{#parent-fieldname-title .documentFirstHeading}& -{.prettyprint}& -{.prettyprint .lang-cpp}& -{.prettyprint .lang-sh}& -{.prettyprint .lang-sh .prettyprinted}& -{#putty---before-we-start-ssh-connection style="text-align: start; "}& -{#resources-allocation-policy}& -{#schematic-overview}& -{.screen}& -{#setup-and-start-your-own-turbovnc-server}& -{style="text-align: left; "}& -ssh-connection style="text-alignstart; "}& -{#putty---before-we-start-& -<span class="pln">& -class="pln">& -id="parent-fieldname-text-5739e5d4b93b40a6b3d987bd4047d4e0">& -id="content-core">& -id="viewlet-above-content-body">& -id="viewlet-below-content-title">& -^[>[1<span>]]& --link}& -<span& - id="Key_management" class="mw-headline">& -class="external-link">& - class="link-external">& -class="WYSIWYG_LINK">& - class="wide-view-wrapper">& - class="listitem">& - class="emphasis">& -class="visualHighlight">& -{.spip_in& -.external& - </span>& diff --git a/scripts/preklopeni_dokumentace/source/tab b/scripts/preklopeni_dokumentace/source/tab deleted file mode 100644 index bec6eca8f5bc1ba6a48cd28c2a1c38babbcf13ef..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/tab +++ /dev/null @@ -1,28 +0,0 @@ -<table>&\\ -</th>&\\ -</td>&\\ -<td align="left">& | -</tr>& | -<tr class="odd">&\\ -<tr class="even">&\\ -</tbody>&\\ -</table>&\\ -<p> class="s1">& -</p>&\\ -<colgroup>&\\ -<col width="50%" />&\\ -</colgroup>&\\ -<thead>&\\ -<tr class="header">&\\ -<tbody>&\\ -<th align="left">& | -</thead>& | --- | --- | -<th align="left">& | -<br />&\\ -</p>&\\ -)\&) -.\&. -<p>& -<table style="width:100%;">&\\ -<col width="16%" />&\\ -<th align="left">& | diff --git a/scripts/preklopeni_dokumentace/source/tabREPLACE b/scripts/preklopeni_dokumentace/source/tabREPLACE deleted file mode 100644 index 7ed3455feb6bc8c41119e9ab92ce8a67644abb74..0000000000000000000000000000000000000000 --- a/scripts/preklopeni_dokumentace/source/tabREPLACE +++ /dev/null @@ -1,147 +0,0 @@ - compute nodes number of workers start-up time[s]&|compute nodes|number of workers|start-up time[s]| - --------------- ------------------- --------------------&|---|---|---| - 16 384 831&|16|384|831| - 8 192 807&|8|192|807| - 4 96 483&|4|96|483| - 2 48 16&|2|48|16| - Node type Count Range Memory Cores [Access](resource-allocation-and-job-execution/resources-allocation-policy.html)&|Node type|Count|Range|Memory|Cores|[Access](resource-allocation-and-job-execution/resources-allocation-policy.html)| - ---------------------------- ------- --------------- -------- ------------- --------------------------------------------------------------------------------------------------&|---|---|---|---|---|---| - Nodes without accelerator 180 cn[1-180] 64GB 16 @ 2.4Ghz qexp, qprod, qlong, qfree&|Nodes without accelerator|180|cn[1-180]|64GB|16 @ 2.4Ghz|qexp, qprod, qlong, qfree| - Nodes with GPU accelerator 23 cn[181-203] 96GB 16 @ 2.3Ghz qgpu, qprod&|Nodes with GPU accelerator|23|cn[181-203]|96GB|16 @ 2.3Ghz|qgpu, qprod| - Nodes with MIC accelerator 4 cn[204-207] 96GB 16 @ 2.3GHz qmic, qprod&|Nodes with MIC accelerator|4|cn[204-207]|96GB|16 @ 2.3GHz|qmic, qprod| - Fat compute nodes 2 cn[208-209] 512GB 16 @ 2.4GHz qfat, qprod&|Fat compute nodes|2|cn[208-209]|512GB|16 @ 2.4GHz|qfat, qprod| - Node Processor Memory Accelerator&|Node|Processor|Memory|Accelerator| - ------------------ --------------------------------------- -------- ----------------------&|---|---|---|---| - w/o accelerator 2x Intel Sandy Bridge E5-2665, 2.4GHz 64GB -&|w/o accelerator|2x Intel Sandy Bridge E5-2665, 2.4GHz|64GB|-| - GPU accelerated 2x Intel Sandy Bridge E5-2470, 2.3GHz 96GB NVIDIA Kepler K20&|GPU accelerated|2x Intel Sandy Bridge E5-2470, 2.3GHz|96GB|NVIDIA Kepler K20| - MIC accelerated 2x Intel Sandy Bridge E5-2470, 2.3GHz 96GB Intel Xeon Phi P5110&|MIC accelerated|2x Intel Sandy Bridge E5-2470, 2.3GHz|96GB|Intel Xeon Phi P5110| - Fat compute node 2x Intel Sandy Bridge E5-2665, 2.4GHz 512GB -&|Fat compute node|2x Intel Sandy Bridge E5-2665, 2.4GHz|512GB|-| - Login address Port Protocol Login node&|Login address|Port|Protocol|Login node| - ------------------------ ------ ---------- -----------------------------------------&|---|---|---|---| - salomon.it4i.cz 22 ssh round-robin DNS record for login[1-4]&|salomon.it4i.cz|22|ssh|round-robin DNS record for login[1-4]| - login1.salomon.it4i.cz 22 ssh login1&|login1.salomon.it4i.cz|22|ssh|login1| - login2.salomon.it4i.cz 22 ssh login2&|login1.salomon.it4i.cz|22|ssh|login1| - login3.salomon.it4i.cz 22 ssh login3&|login1.salomon.it4i.cz|22|ssh|login1| - login4.salomon.it4i.cz 22 ssh login4&|login1.salomon.it4i.cz|22|ssh|login1| - Toolchain Module(s)&|Toolchain|Module(s)| - -------------------- ------------------------------------------------&|---|----| - GCC GCC&|GCC|GCC| - ictce icc, ifort, imkl, impi&|ictce|icc, ifort, imkl, impi| - intel GCC, icc, ifort, imkl, impi&|intel|GCC, icc, ifort, imkl, impi| - gompi GCC, OpenMPI&|gompi|GCC, OpenMPI| - goolf BLACS, FFTW, GCC, OpenBLAS, OpenMPI, ScaLAPACK&|goolf|BLACS, FFTW, GCC, OpenBLAS, OpenMPI, ScaLAPACK| - >iompi OpenMPI, icc, ifort&|iompi|OpenMPI, icc, ifort| - iccifort icc, ifort&|iccifort|icc, ifort| - Login address Port Protocol Login node& |Login address|Port|Protocol|Login node| - ------------------------------ ------ ---------- ----------------------------------& |---|---| - salomon-prace.it4i.cz 2222 gsissh login1, login2, login3 or login4& |salomon-prace.it4i.cz|2222|gsissh|login1, login2, login3 or login4| - login1-prace.salomon.it4i.cz 2222 gsissh login1& |login1-prace.salomon.it4i.cz|2222|gsissh|login1| - login2-prace.salomon.it4i.cz 2222 gsissh login2& |login2-prace.salomon.it4i.cz|2222|gsissh|login2| - login3-prace.salomon.it4i.cz 2222 gsissh login3& |login3-prace.salomon.it4i.cz|2222|gsissh|login3| - login4-prace.salomon.it4i.cz 2222 gsissh login4& |login4-prace.salomon.it4i.cz|2222|gsissh|login4| - |Login address|Port|Protocol|Login node|& |Login address|Port|Protocol|Login node| - ------------------------ ------ ---------- ----------------------------------& |---|---| - salomon.it4i.cz 2222 gsissh login1, login2, login3 or login4& |salomon.it4i.cz|2222|gsissh|login1, login2, login3 or login4| - login1.salomon.it4i.cz 2222 gsissh login1& |login1.salomon.it4i.cz|2222|gsissh|login1| - login2.salomon.it4i.cz 2222 gsissh login2& |login2-prace.salomon.it4i.cz|2222|gsissh|login2| - login3.salomon.it4i.cz 2222 gsissh login3& |login3-prace.salomon.it4i.cz|2222|gsissh|login3| - login4.salomon.it4i.cz 2222 gsissh login4& |login4-prace.salomon.it4i.cz|2222|gsissh|login4| - Login address Port Node role& |Login address|Port|Node role| - ------------------------------- ------ -----------------------------& |---|---| - gridftp-prace.salomon.it4i.cz 2812 Front end /control server& |gridftp-prace.salomon.it4i.cz|2812|Front end /control server| - lgw1-prace.salomon.it4i.cz 2813 Backend / data mover server& |lgw1-prace.salomon.it4i.cz|2813|Backend / data mover server| - lgw2-prace.salomon.it4i.cz 2813 Backend / data mover server& |lgw2-prace.salomon.it4i.cz|2813|Backend / data mover server| - lgw3-prace.salomon.it4i.cz 2813 Backend / data mover server& |lgw3-prace.salomon.it4i.cz|2813|Backend / data mover server| - Login address Port Node role& |Login address|Port|Node role| - ------------------------- ------ -----------------------------& |---|---| - gridftp.salomon.it4i.cz 2812 Front end /control server& |gridftp.salomon.it4i.cz|2812|Front end /control server| - lgw1.salomon.it4i.cz 2813 Backend / data mover server& |lgw1.salomon.it4i.cz|2813|Backend / data mover server| - lgw2.salomon.it4i.cz 2813 Backend / data mover server& |lgw2.salomon.it4i.cz|2813|Backend / data mover server| - lgw3.salomon.it4i.cz 2813 Backend / data mover server& |lgw3.salomon.it4i.cz|2813|Backend / data mover server| - File system mount point Filesystem Comment& |File system mount point|Filesystem|Comment| - ------------------------- ------------ ----------------------------------------------------------------& |---|---| - /home Lustre Default HOME directories of users in format /home/prace/login/& |/home|Lustre|Default HOME directories of users in format /home/prace/login/| - /scratch Lustre Shared SCRATCH mounted on the whole cluster& |/scratch|Lustre|Shared SCRATCH mounted on the whole cluster| - Data type Default path& |Data type|Default path| - ------------------------------ ---------------------------------& |---|---| - large project files /scratch/work/user/prace/login/& |large project files|/scratch/work/user/prace/login/| - large scratch/temporary data /scratch/temp/& |large scratch/temporary data|/scratch/temp/| - ---------------------------------------------------------------------------------------------------------------------------------------------& - queue Active project Project resources Nodes priority authorization walltime&|queue|Active project|Project resources|Nodes|priority|authorization|walltime default/max| - default/max& - --------------------- -|---|---|---|--------------------- ---------- --------------- -------------&|---|---| - **qexp** no none required 32 nodes, max 8 per user 150 no 1 / 1h& |**qexp** Express queue|no|none required|32 nodes, max 8 per user|150|no|1 / 1h| - Express queue&\\ - **qprace** yes > 0 >1006 nodes, max 86 per job 0 no 24 / 48h&|**qprace** Production queue|yes|> 0|1006 nodes, max 86 per job|0|no|24 / 48h| - Production queue&\\ - **qfree** yes none required 752 nodes, max 86 per job -1024 no 12 / 12h&|**qfree** Free resource queue|yes|none required|752 nodes, max 86 per job|-1024|no|12 / 12h| - Free resource queue&\\ - ---------------------------------------------------------------------------------------------------------------------------------------------& - Port Protocol& |Port|Protocol| - ------ ----------& |---|---| - 22 ssh& |22|ssh| - 80 http& |80|http| - 443 https& |443|https| - 9418 git& |9418|git| - Node Count Processor Cores Memory Accelerator& |Node|Count|Processor|Cores|Memory|Accelerator| - ----------------- ------- ---------------------------------- ------- -------- --------------------------------------------& |---|---| - w/o accelerator 576 2x Intel Xeon E5-2680v3, 2.5GHz 24 128GB -& |w/o accelerator|576|2x Intel Xeon E5-2680v3, 2.5GHz|24|128GB|-| - MIC accelerated 432 2x Intel Xeon E5-2680v3, 2.5GHz 24 128GB 2x Intel Xeon Phi 7120P, 61cores, 16GB RAM& |MIC accelerated|432|2x Intel Xeon E5-2680v3, 2.5GHz|24|128GB|2x Intel Xeon Phi 7120P, 61cores, 16GB RAM| - Node Count Processor Cores Memory GPU Accelerator& |Node|Count|Processor|Cores|Memory|GPU Accelerator| - --------------- ------- --------------------------------- ------- -------- ------------------------------& |---|---| - visualization 2 2x Intel Xeon E5-2695v3, 2.3GHz 28 512GB NVIDIA QUADRO K5000, 4GB RAM& |visualization|2|2x Intel Xeon E5-2695v3, 2.3GHz|28|512GB|NVIDIA QUADRO K5000, 4GB RAM| - Hypercube dimension& |Hypercube|dimension| - --------------------- -------------------------------------------& |---|---| - 1D ehc_1d& |1D|ehc_1d| - 2D ehc_2d& |2D|ehc_2d| - 3D ehc_3d& |3D|ehc_3d| - 4D ehc_4d& |4D|ehc_4d| - 5D ehc_5d& |5D|ehc_5d| - 6D ehc_6d& |6D|ehc_6d| - 7D ehc_7d& |7D|ehc_7d| - Node type Count Short name Long name Rack& |Node type|Count|Short name|Long name|Rack| - -------------------------------------- ------- ------------------ -------------------------- -------& |---|---| - M-Cell compute nodes w/o accelerator 576 cns1 -cns576 r1i0n0 - r4i7n17 1-4& |M-Cell compute nodes w/o accelerator|576|cns1 -cns576|r1i0n0 - r4i7n17|1-4| - compute nodes MIC accelerated 432 cns577 - cns1008 r21u01n577 - r37u31n1008 21-38& |compute nodes MIC accelerated|432|cns577 - cns1008|r21u01n577 - r37u31n1008|21-38| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------& - Mountpoint Usage Protocol Net Capacity Throughput Limitations Access Services& |Mountpoint|Usage|Protocol|Net|Capacity|Throughput|Limitations|Access| - ---------------------------------------------- -------------------------------- ------------- -------------- ------------ ------------- ------------------------- -----------------------------& |---|---| - /home home directory NFS, 2-Tier 0.5 PB 6 GB/s Quota 250GB Compute and login nodes backed up&| /home|home directory|NFS, 2-Tier|0.5 PB|6 GB/s|Quota 250GB|Compute and login nodes|backed up| - /scratch/work large project files Lustre 1.69 PB 30 GB/s Quota Compute and login nodes none& |/scratch/work|large project files|Lustre|1.69 PB|30 GB/s|Quota|Compute and login nodes|none| - 1TB& - /scratch/temp job temporary data Lustre 1.69 PB 30 GB/s Quota 100TB Compute and login nodes files older 90 days removed& |/scratch/temp|job temporary data|Lustre|1.69 PB|30 GB/s|Quota 100TB|Compute and login nodes|files older 90 days removed| - /ramdisk job temporary data, node local local 120GB 90 GB/s none Compute nodes purged after job ends& |/ramdisk|job temporary data, node local|local|120GB|90 GB/s|none|Compute nodes|purged after job ends| - -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------& - Application Version module& |Application|Version|module| - ------------- -------------- ---------------------& |---|---| - **R** R 3.1.1 R/3.1.1-intel-2015b& |**R**|R 3.1.1|R/3.1.1-intel-2015b| - **Rstudio** Rstudio 0.97 Rstudio& |**Rstudio**|Rstudio 0.97|Rstudio| - 24 MPI**&**24 MPI** - --------------- -----------------------& |**Version**|**Module**| -**Version** **Module**& |---|---| - 2016 Update 2 Advisor/2016_update2& |2016 Update 2|Advisor/2016_update2| - 2016 Update 1 Inspector/2016_update1& |2016 Update 1|Inspector/2016_update1| - Interface Integer type& |Interface|Integer type| - ----------- -----------------------------------------------& |---|---| - LP64 32-bit, int, integer(kind=4), MPI_INT& |LP64|32-bit, int, integer(kind=4), MPI_INT| - ILP64 64-bit, long int, integer(kind=8), MPI_INT64 & |ILP64|64-bit, long int, integer(kind=8), MPI_INT64| - Parameter Value& |Parameter|Value| - ------------------------------------------------- -----------------------------& |---|---| - >max number of atoms 200& |max number of atoms|200| - >max number of valence orbitals 300& |max number of valence orbitals|300| - >max number of basis functions 4095& |max number of basis functions|4095| - >max number of states per symmmetry 20& |max number of states per symmmetry|20| - >max number of state symmetries 16& |max number of state symmetries|16| - >max number of records 200& |max number of records|200| - >max number of primitives >maxbfn x [2]& |max number of primitives|maxbfn x [2]| - Address Port Protocol& |Address|Port|Protocol| - -------------------------------------------------- ---------------------------------- -----------------------------------------& |---|---| - anselm.it4i.cz 22 scp, sftp& |anselm.it4i.cz|22|scp, sftp| - login1.anselm.it4i.cz 22 scp, sftp& |login1.anselm.it4i.cz|22|scp, sftp| - login2.anselm.it4i.cz 22 scp, sftp& |login2.anselm.it4i.cz|22|scp, sftp| - class="discreet">dm1.anselm.it4i.cz 22 class="discreet">scp, sftp</span>& |dm1.anselm.it4i.cz|22|scp, sftp| - Login address Port Protocol Login node& |Login address|Port|Protocol|Login node| - ----------------------- ------ ---------- ----------------------------------------------& |---|----| - anselm.it4i.cz 22 ssh round-robin DNS record for login1 and login2& |anselm.it4i.cz|22|ssh|round-robin DNS record for login1 and login2| - login1.anselm.it4i.cz 22 ssh login1& |login1.anselm.it4i.cz|22|ssh|login1| - login2.anselm.it4i.cz 22 ssh login2& |login2.anselm.it4i.cz|22|ssh|login2| diff --git a/scripts/titlemd.py b/scripts/titlemd.py new file mode 100755 index 0000000000000000000000000000000000000000..2fc5cb89c9cd25b4076cd7800b91fbf8b8e899c7 --- /dev/null +++ b/scripts/titlemd.py @@ -0,0 +1,48 @@ +#!/usr/bin/python + +import fnmatch +import os +import sys + +try: + from titlecase import titlecase +except ImportError: + print("Please install titlecase") + +def main(location): + # Spelling exceptions + with open('.spelling') as f: + spelling = f.readlines() + + def abbreviations(word, **kwargs): + if word+"\n" in spelling: + return word + + # Open the file and read the lines as a list + with open(location) as f: + lines = f.readlines() + + with open(location, 'w') as f: + # Loop through the list of lines and titlecase + # any line beginning with '#'. + prev_line = lines.pop(0) + disabled = 0 + for line in lines: + if line.startswith("``") and disabled == 0: + disabled = 1 + else: + if line.startswith("``") and disabled == 1: + disabled = 0 + if line.startswith('#') and disabled == 0: + line = titlecase(line[:(line.find("]"))], callback=abbreviations)+line[(line.find("]")):] + if (line.startswith('---') or line.startswith('===')) and disabled == 0: + prev_line = titlecase(prev_line[:(prev_line.find("]"))], callback=abbreviations)+prev_line[(prev_line.find("]")):] + f.write(prev_line) + prev_line = line + f.write(prev_line) + +if __name__ == "__main__": + try: + main(sys.argv[1]) + except IndexError: + main('.') diff --git a/scripts/titlemd_test.py b/scripts/titlemd_test.py new file mode 100755 index 0000000000000000000000000000000000000000..38ad036adb514ac8b857b831155a62d5ef5f7b2e --- /dev/null +++ b/scripts/titlemd_test.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +from __future__ import print_function + +import fnmatch +import os +import sys + +try: + from titlecase import titlecase +except ImportError: + print("Please install titlecase") + +def main(location): + # Spelling exceptions + with open('.spelling') as f: + spelling = f.readlines() + + def abbreviations(word, **kwargs): + if word+"\n" in spelling: + return word + + # Open the file and read the lines as a list + with open(location) as f: + lines = f.readlines() + + # Loop through the list of lines and titlecase + # any line beginning with '#'. + return_value = 0 + prev_line = lines.pop(0) + disabled = 0 + echo_filename = False + for line in lines: + if line.startswith("``") and disabled == 0: + disabled = 1 + else: + if line.startswith("``") and disabled == 1: + disabled = 0 + if line.startswith('#') and disabled == 0: + if line != titlecase(line[:(line.find("]"))], callback=abbreviations)+line[(line.find("]")):]: + if return_value == 0 and echo_filename == False: + print("%s" % location) + echo_filename = True + print("-"+line,end="") + print("+"+titlecase(line[:(line.find("]"))], callback=abbreviations)+line[(line.find("]")):],end="") + print() + return_value = 1 + if (line.startswith('---') or line.startswith('===')) and disabled == 0: + if prev_line != titlecase(prev_line[:(prev_line.find("]"))], callback=abbreviations)+prev_line[(prev_line.find("]")):]: + if return_value == 0 and echo_filename == False: + print("%s" % location) + echo_filename = True + print("-"+prev_line,end="") + print("+"+titlecase(prev_line[:(prev_line.find("]"))], callback=abbreviations)+prev_line[(prev_line.find("]")):],end="") + print() + return_value = 1 + prev_line = line + exit(return_value) +if __name__ == "__main__": + try: + main(sys.argv[1]) + except IndexError: + main('.')