From 8a1eb760b9517eb5589da989a3693467041453db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Hrb=C3=A1=C4=8D?= <david@hrbac.cz> Date: Sun, 22 Jan 2017 22:38:20 +0100 Subject: [PATCH] Spell check --- .spelling | 234 ++++++++++++++++++ .../compute-nodes.md | 24 +- .../environment-and-modules.md | 2 +- .../hardware-overview.md | 20 +- .../introduction.md | 4 +- .../job-priority.md | 4 +- .../anselm-cluster-documentation/network.md | 6 +- .../anselm-cluster-documentation/prace.md | 4 +- .../resource-allocation-and-job-execution.md | 2 +- .../software/ansys/ansys-cfx.md | 4 +- .../software/ansys/ansys-fluent.md | 2 +- .../software/ansys/ansys-ls-dyna.md | 4 +- .../software/ansys/ansys-mechanical-apdl.md | 4 +- .../software/ansys/ansys.md | 2 +- .../software/ansys/ls-dyna.md | 6 +- .../software/chemistry/molpro.md | 2 +- .../software/chemistry/nwchem.md | 2 +- .../software/compilers.md | 6 +- .../software/comsol-multiphysics.md | 2 +- .../debuggers/allinea-performance-reports.md | 2 +- .../software/debuggers/cube.md | 2 +- .../intel-performance-counter-monitor.md | 2 +- .../debuggers/intel-vtune-amplifier.md | 2 +- .../software/debuggers/papi.md | 2 +- .../software/debuggers/scalasca.md | 2 +- .../software/debuggers/valgrind.md | 2 +- .../software/debuggers/vampir.md | 2 +- .../software/index.md | 14 +- .../software/intel-suite/intel-compilers.md | 2 +- .../software/isv_licenses.md | 8 +- .../software/java.md | 2 +- .../software/kvirtualization.md | 18 +- .../software/mpi/mpi.md | 4 +- .../numerical-languages/matlab_1314.md | 4 +- .../software/numerical-languages/octave.md | 2 +- .../software/numerical-languages/r.md | 2 +- .../software/numerical-libraries/fftw.md | 4 +- .../software/numerical-libraries/hdf5.md | 4 +- .../software/numerical-libraries/petsc.md | 2 +- .../software/numerical-libraries/trilinos.md | 6 +- .../software/nvidia-cuda.md | 8 +- .../omics-master/diagnostic-component-team.md | 2 +- .../priorization-component-bierapp.md | 8 +- .../software/operating-system.md | 2 +- .../software/paraview.md | 2 +- .../anselm-cluster-documentation/storage.md | 24 +- .../certificates-faq.md | 2 +- docs.it4i/salomon/compute-nodes.md | 22 +- docs.it4i/salomon/environment-and-modules.md | 14 +- docs.it4i/salomon/hardware-overview.md | 2 +- docs.it4i/salomon/ib-single-plane-topology.md | 14 +- docs.it4i/salomon/introduction.md | 2 +- docs.it4i/salomon/job-priority.md | 4 +- docs.it4i/salomon/network.md | 6 +- docs.it4i/salomon/prace.md | 6 +- .../resource-allocation-and-job-execution.md | 2 +- docs.it4i/salomon/software/compilers.md | 10 +- docs.it4i/salomon/software/index.md | 8 +- docs.it4i/salomon/software/java.md | 2 +- .../salomon/software/operating-system.md | 2 +- docs.it4i/salomon/storage.md | 14 +- 61 files changed, 406 insertions(+), 172 deletions(-) diff --git a/.spelling b/.spelling index cbbdd7d03..5087e18b3 100644 --- a/.spelling +++ b/.spelling @@ -9,3 +9,237 @@ IT4Innovations PBS Salomon TurboVNC +DDR3 +DIMM +InfiniBand +CUDA +COMSOL +LiveLink +MATLAB +Allinea +LLNL +Vampir +Doxygen +VTune +TotalView +Valgrind +ParaView +OpenFOAM +MPI4Py +MPICH2 +PETSc +Trilinos +FFTW +HDF5 +BiERapp +AVX +AVX2 +JRE +JDK +QEMU +VMware +VirtualBox +NUMA +SMP +BLAS +LAPACK +FFTW3 +Dongarra +OpenCL +cuBLAS +CESNET +Jihlava +NVIDIA +Xeon +ANSYS +CentOS +RHEL +DDR4 +DIMMs +GDDR5 +EasyBuild +e.g. +MPICH +MVAPICH2 +OpenBLAS +ScaLAPACK +SGI +UV2000 +400GB +Mellanox +RedHat +ssh.du1.cesnet.cz +ssh.du2.cesnet.cz +ssh.du3.cesnet.cz +DECI +supercomputing +AnyConnect +X11 + - docs.it4i/anselm-cluster-documentation/environment-and-modules.md +MODULEPATH +bashrc +PrgEnv-gnu +bullx +MPI +PrgEnv-intel +EasyBuild + - docs.it4i/anselm-cluster-documentation/capacity-computing.md +capacity.zip +README + - docs.it4i/anselm-cluster-documentation/compute-nodes.md +DIMMs + - docs.it4i/anselm-cluster-documentation/hardware-overview.md +cn +K20 +Xeon +x86-64 +Virtualization +virtualization +NVIDIA +5110P +SSD +lscratch +login1 +login2 +dm1 +Rpeak +LINPACK +Rmax +E5-2665 +E5-2470 +P5110 + - docs.it4i/anselm-cluster-documentation/introduction.md +RedHat + - docs.it4i/anselm-cluster-documentation/job-priority.md +walltime +qexp + - docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md +15209.srv11 +qsub +15210.srv11 +pwd +cn17.bullx +cn108.bullx +cn109.bullx +cn110.bullx +pdsh +hostname +SCRDIR +mkdir +mpiexec +qprod +Jobscript +jobscript +cn108 +cn109 +cn110 + - docs.it4i/anselm-cluster-documentation/network.md +ib0 + - docs.it4i/anselm-cluster-documentation/prace.md +PRACE +qfree +it4ifree +it4i.portal.clients + - docs.it4i/anselm-cluster-documentation/shell-and-data-access.md +VPN + - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md +ANSYS +CFX +cfx.pbs + - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md +mapdl.pbs + - docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md +HPC +lsdyna.pbs + - docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md +OpenMP + - docs.it4i/anselm-cluster-documentation/software/compilers.md +Fortran + - docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md +E5-2600 + - docs.it4i/anselm-cluster-documentation/software/debuggers/score-p.md +Makefile + - docs.it4i/anselm-cluster-documentation/software/gpi2.md +gcc +cn79 + - docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md +Haswell +CPUs + - docs.it4i/anselm-cluster-documentation/software/kvirtualization.md +rc.local +runlevel +RDP +DHCP +DNS +SMB +VDE +smb.conf +TMPDIR +run.bat. + - docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md +NumPy + - docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md +mpiLibConf.m +matlabcode.m +output.out +matlabcodefile +sched + - docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md +UV2000 + - docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md +_THREADS + - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md +CMake-aware +Makefile.export + - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md +ansysdyna.pbs + - docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md +svsfem.cz + - docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md +libmpiwrap-amd64-linux + - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md +cn204 + - docs.it4i/anselm-cluster-documentation/software/paraview.md +cn77 +localhost + - docs.it4i/anselm-cluster-documentation/storage.md +ssh.du1.cesnet.cz +Plzen +ssh.du2.cesnet.cz +ssh.du3.cesnet.cz + - docs.it4i/salomon/environment-and-modules.md +icc + - docs.it4i/salomon/hardware-overview.md +HW + - docs.it4i/salomon/job-submission-and-execution.md +15209.isrv5 +r21u01n577 +r21u02n578 +r21u03n579 +r21u04n580 +qsub +15210.isrv5 +pwd +r2i5n6.ib0.smc.salomon.it4i.cz +r4i6n13.ib0.smc.salomon.it4i.cz +r4i7n2.ib0.smc.salomon.it4i.cz +pdsh +r2i5n6 +r4i6n13 +r4i7n +r4i7n2 +r4i7n0 +SCRDIR +myjob +mkdir +mympiprog.x +mpiexec +myprog.x + - docs.it4i/salomon/7d-enhanced-hypercube.md +cns1 +cns576 +r1i0n0 +r4i7n17 +cns577 +cns1008 +r37u31n1008 diff --git a/docs.it4i/anselm-cluster-documentation/compute-nodes.md b/docs.it4i/anselm-cluster-documentation/compute-nodes.md index d6ddb121a..279802a20 100644 --- a/docs.it4i/anselm-cluster-documentation/compute-nodes.md +++ b/docs.it4i/anselm-cluster-documentation/compute-nodes.md @@ -108,28 +108,28 @@ Memory Architecture - 2 sockets - Memory Controllers are integrated into processors. - - 8 DDR3 DIMMS per node - - 4 DDR3 DIMMS per CPU - - 1 DDR3 DIMMS per channel + - 8 DDR3 DIMMs per node + - 4 DDR3 DIMMs per CPU + - 1 DDR3 DIMMs per channel - Data rate support: up to 1600MT/s -- Populated memory: 8x 8GB DDR3 DIMM 1600Mhz +- Populated memory: 8 x * GB DDR3 DIMM 1600 MHz ### Compute Node With GPU or MIC Accelerator - 2 sockets - Memory Controllers are integrated into processors. - - 6 DDR3 DIMMS per node - - 3 DDR3 DIMMS per CPU - - 1 DDR3 DIMMS per channel + - 6 DDR3 DIMMs per node + - 3 DDR3 DIMMs per CPU + - 1 DDR3 DIMMs per channel - Data rate support: up to 1600MT/s -- Populated memory: 6x 16GB DDR3 DIMM 1600Mhz +- Populated memory: 6 x 16 GB DDR3 DIMM 1600 MHz ### Fat Compute Node - 2 sockets - Memory Controllers are integrated into processors. - - 16 DDR3 DIMMS per node - - 8 DDR3 DIMMS per CPU - - 2 DDR3 DIMMS per channel + - 16 DDR3 DIMMs per node + - 8 DDR3 DIMMs per CPU + - 2 DDR3 DIMMs per channel - Data rate support: up to 1600MT/s -- Populated memory: 16x 32GB DDR3 DIMM 1600Mhz +- Populated memory: 16 x 32 GB DDR3 DIMM 1600 MHz diff --git a/docs.it4i/anselm-cluster-documentation/environment-and-modules.md b/docs.it4i/anselm-cluster-documentation/environment-and-modules.md index f2b19853f..916f14a9d 100644 --- a/docs.it4i/anselm-cluster-documentation/environment-and-modules.md +++ b/docs.it4i/anselm-cluster-documentation/environment-and-modules.md @@ -25,7 +25,7 @@ fi ``` !!! Note "Note" - Do not run commands outputing to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. + Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. ### Application Modules diff --git a/docs.it4i/anselm-cluster-documentation/hardware-overview.md b/docs.it4i/anselm-cluster-documentation/hardware-overview.md index 5aac8f98e..720f9bf29 100644 --- a/docs.it4i/anselm-cluster-documentation/hardware-overview.md +++ b/docs.it4i/anselm-cluster-documentation/hardware-overview.md @@ -1,9 +1,9 @@ Hardware Overview ================= -The Anselm cluster consists of 209 computational nodes named cn[1-209] of which 180 are regular compute nodes, 23 GPU Kepler K20 accelerated nodes, 4 MIC Xeon Phi 5110 accelerated nodes and 2 fat nodes. Each node is a powerful x86-64 computer, equipped with 16 cores (two eight-core Intel Sandy Bridge processors), at least 64GB RAM, and local hard drive. The user access to the Anselm cluster is provided by two login nodes login[1,2]. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 320TB /home disk storage to store the user files. The 146TB shared /scratch storage is available for the scratch data. +The Anselm cluster consists of 209 computational nodes named cn[1-209] of which 180 are regular compute nodes, 23 GPU Kepler K20 accelerated nodes, 4 MIC Xeon Phi 5110 accelerated nodes and 2 fat nodes. Each node is a powerful x86-64 computer, equipped with 16 cores (two eight-core Intel Sandy Bridge processors), at least 64 GB RAM, and local hard drive. The user access to the Anselm cluster is provided by two login nodes login[1,2]. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 320 TB /home disk storage to store the user files. The 146 TB shared /scratch storage is available for the scratch data. -The Fat nodes are equipped with large amount (512GB) of memory. Virtualization infrastructure provides resources to run long term servers and services in virtual mode. Fat nodes and virtual servers may access 45 TB of dedicated block storage. Accelerated nodes, fat nodes, and virtualization infrastructure are available [upon request](https://support.it4i.cz/rt) made by a PI. +The Fat nodes are equipped with large amount (512 GB) of memory. Virtualization infrastructure provides resources to run long term servers and services in virtual mode. Fat nodes and virtual servers may access 45 TB of dedicated block storage. Accelerated nodes, fat nodes, and virtualization infrastructure are available [upon request](https://support.it4i.cz/rt) made by a PI. Schematic representation of the Anselm cluster. Each box represents a node (computer) or storage capacity: @@ -16,16 +16,16 @@ There are four types of compute nodes: - 180 compute nodes without the accelerator - 23 compute nodes with GPU accelerator - equipped with NVIDIA Tesla Kepler K20 - 4 compute nodes with MIC accelerator - equipped with Intel Xeon Phi 5110P -- 2 fat nodes - equipped with 512GB RAM and two 100GB SSD drives +- 2 fat nodes - equipped with 512 GB RAM and two 100 GB SSD drives [More about Compute nodes](compute-nodes/). GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](resources-allocation-policy/). All these nodes are interconnected by fast InfiniBand network and Ethernet network. [More about the Network](network/). -Every chassis provides Infiniband switch, marked **isw**, connecting all nodes in the chassis, as well as connecting the chassis to the upper level switches. +Every chassis provides InfiniBand switch, marked **isw**, connecting all nodes in the chassis, as well as connecting the chassis to the upper level switches. -All nodes share 360TB /home disk storage to store user files. The 146TB shared /scratch storage is available for the scratch data. These file systems are provided by Lustre parallel file system. There is also local disk storage available on all compute nodes /lscratch. [More about Storage](storage/). +All nodes share 360 TB /home disk storage to store user files. The 146 TB shared /scratch storage is available for the scratch data. These file systems are provided by Lustre parallel file system. There is also local disk storage available on all compute nodes /lscratch. [More about Storage](storage/). The user access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing cluster.](shell-and-data-access/) @@ -38,7 +38,7 @@ The parameters are summarized in the following tables: |Operating system|Linux| |[**Compute nodes**](compute-nodes/)|| |Totally|209| -|Processor cores|16 (2x8 cores)| +|Processor cores|16 (2 x 8 cores)| |RAM|min. 64 GB, min. 4 GB per core| |Local disk drive|yes - usually 500 GB| |Compute network|InfiniBand QDR, fully non-blocking, fat-tree| @@ -53,9 +53,9 @@ The parameters are summarized in the following tables: |Node|Processor|Memory|Accelerator| |---|---|---|---| - |w/o accelerator|2x Intel Sandy Bridge E5-2665, 2.4GHz|64GB|-| - |GPU accelerated|2x Intel Sandy Bridge E5-2470, 2.3GHz|96GB|NVIDIA Kepler K20| - |MIC accelerated|2x Intel Sandy Bridge E5-2470, 2.3GHz|96GB|Intel Xeon Phi P5110| - |Fat compute node|2x Intel Sandy Bridge E5-2665, 2.4GHz|512GB|-| + |w/o accelerator|2 x Intel Sandy Bridge E5-2665, 2.4 GHz|64 GB|-| + |GPU accelerated|2 x Intel Sandy Bridge E5-2470, 2.3 GHz|96 GB|NVIDIA Kepler K20| + |MIC accelerated|2 x Intel Sandy Bridge E5-2470, 2.3 GHz|96 GB|Intel Xeon Phi P5110| + |Fat compute node|2 x Intel Sandy Bridge E5-2665, 2.4 GHz|512 GB|-| For more details please refer to the [Compute nodes](compute-nodes/), [Storage](storage/), and [Network](network/). diff --git a/docs.it4i/anselm-cluster-documentation/introduction.md b/docs.it4i/anselm-cluster-documentation/introduction.md index 181837f91..520b4d616 100644 --- a/docs.it4i/anselm-cluster-documentation/introduction.md +++ b/docs.it4i/anselm-cluster-documentation/introduction.md @@ -1,11 +1,11 @@ Introduction ============ -Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totaling 3344 compute cores with 15TB RAM and giving over 94 Tflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64GB RAM, and 500GB harddrive. Nodes are interconnected by fully non-blocking fat-tree Infiniband network and equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). +Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totaling 3344 compute cores with 15 TB RAM and giving over 94 Tflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64 GB RAM, and 500 GB hard disk drive. Nodes are interconnected by fully non-blocking fat-tree InfiniBand network and equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). The cluster runs bullx Linux ([bull](http://www.bull.com/bullx-logiciels/systeme-exploitation.html)) [operating system](software/operating-system/), which is compatible with the RedHat [ Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) We have installed a wide range of software packages targeted at different scientific domains. These packages are accessible via the [modules environment](environment-and-modules/). -User data shared file-system (HOME, 320TB) and job data shared file-system (SCRATCH, 146TB) are available to users. +User data shared file-system (HOME, 320 TB) and job data shared file-system (SCRATCH, 146 TB) are available to users. The PBS Professional workload manager provides [computing resources allocations and job execution](resources-allocation-policy/). diff --git a/docs.it4i/anselm-cluster-documentation/job-priority.md b/docs.it4i/anselm-cluster-documentation/job-priority.md index d86e3a8f1..6e88bea50 100644 --- a/docs.it4i/anselm-cluster-documentation/job-priority.md +++ b/docs.it4i/anselm-cluster-documentation/job-priority.md @@ -32,7 +32,7 @@ Fairshare priority is calculated as where MAX_FAIRSHARE has value 1E6, usage~Project~ is cumulated usage by all members of selected project, usage~Total~ is total usage by all users, by all projects. -Usage counts allocated corehours (ncpus*walltime). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week). Jobs queued in queue qexp are not calculated to project's usage. +Usage counts allocated core hours (ncpus*walltime). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week). Jobs queued in queue qexp are not calculated to project's usage. >Calculated usage and fairshare priority can be seen at <https://extranet.it4i.cz/anselm/projects>. @@ -65,4 +65,4 @@ It means, that jobs with lower execution priority can be run before jobs with hi !!! Note "Note" It is **very beneficial to specify the walltime** when submitting jobs. -Specifying more accurate walltime enables better schedulling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority. +Specifying more accurate walltime enables better scheduling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority. diff --git a/docs.it4i/anselm-cluster-documentation/network.md b/docs.it4i/anselm-cluster-documentation/network.md index 7f8a989eb..308d7caa0 100644 --- a/docs.it4i/anselm-cluster-documentation/network.md +++ b/docs.it4i/anselm-cluster-documentation/network.md @@ -1,9 +1,9 @@ Network ======= -All compute and login nodes of Anselm are interconnected by [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) QDR network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet) network. Both networks may be used to transfer user data. +All compute and login nodes of Anselm are interconnected by [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) QDR network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet) network. Both networks may be used to transfer user data. -Infiniband Network +InfiniBand Network ------------------ All compute and login nodes of Anselm are interconnected by a high-bandwidth, low-latency [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) QDR network (IB 4x QDR, 40 Gbps). The network topology is a fully non-blocking fat-tree. @@ -34,4 +34,4 @@ $ ssh 10.2.1.110 $ ssh 10.1.1.108 ``` -In this example, we access the node cn110 by Infiniband network via the ib0 interface, then from cn110 to cn108 by Ethernet network. +In this example, we access the node cn110 by InfiniBand network via the ib0 interface, then from cn110 to cn108 by Ethernet network. diff --git a/docs.it4i/anselm-cluster-documentation/prace.md b/docs.it4i/anselm-cluster-documentation/prace.md index d8003b41c..b78343743 100644 --- a/docs.it4i/anselm-cluster-documentation/prace.md +++ b/docs.it4i/anselm-cluster-documentation/prace.md @@ -224,8 +224,8 @@ For PRACE users, the default production run queue is "qprace". PRACE users can a |queue|Active project|Project resources|Nodes|priority|authorization|walltime| |---|---|---|---|---|---|---| |**qexp** Express queue|no|none required|2 reserved, 8 total|high|no|1 / 1h| -|**qprace** Production queue|yes|> 0|178 w/o accelerator|medium|no|24 / 48h| -|**qfree** Free resource queue|yes|none required|178 w/o accelerator|very low|no| 12 / 12h| +|**qprace** Production queue|yes|> 0|178 w/o accelerator|medium|no|24 / 48 h| +|**qfree** Free resource queue|yes|none required|178 w/o accelerator|very low|no| 12 / 12 h| **qprace**, the PRACE: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprace. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprace is 12 hours. If the job needs longer time, it must use checkpoint/restart functionality. diff --git a/docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md b/docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md index f7ef0e127..fa2f1d687 100644 --- a/docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md +++ b/docs.it4i/anselm-cluster-documentation/resource-allocation-and-job-execution.md @@ -1,7 +1,7 @@ Resource Allocation and Job Execution ===================================== -To run a [job](../introduction/), [computational resources](../introduction/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive informations about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide. +To run a [job](../introduction/), [computational resources](../introduction/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide. Resources Allocation Policy --------------------------- diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md index 93dfb1d2b..3ea63bb83 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md @@ -48,9 +48,9 @@ echo Machines: $hl /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**. [More about licensing here](licensing/) diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md index 569a60075..a7116910f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md @@ -4,7 +4,7 @@ ANSYS Fluent [ANSYS Fluent](http://www.ansys.com/Products/Simulation+Technology/Fluid+Dynamics/Fluid+Dynamics+Products/ANSYS+Fluent) software contains the broad physical modeling capabilities needed to model flow, turbulence, heat transfer, and reactions for industrial applications ranging from air flow over an aircraft wing to combustion in a furnace, from bubble columns to oil platforms, from blood flow to semiconductor manufacturing, and from clean room design to wastewater treatment plants. Special models that give the software the ability to model in-cylinder combustion, aeroacoustics, turbomachinery, and multiphase systems have served to broaden its reach. -1. Common way to run Fluent over pbs file +1. Common way to run Fluent over PBS file ----------------------------------------- To run ANSYS Fluent in batch mode you can utilize/modify the default fluent.pbs script and execute it via the qsub command. diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md index 24a16a848..f254380e9 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md @@ -51,6 +51,6 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i= +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ANSYS solver via parameter i= diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md index 22f8e5a26..e0e3f5be0 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md @@ -50,9 +50,9 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -b -dis -p aa_r -i input.dat -o file.out -machines $hl -dir $WORK_DIR ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution.md). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution.md). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ansys solver via parameter -i +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ANSYS solver via parameter -i **License** should be selected by parameter -p. Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**, aa_r_dy (ANSYS **Academic** AUTODYN) [More about licensing here](licensing/) diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md index ad4aa8865..d0fedd929 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md @@ -3,7 +3,7 @@ Overview of ANSYS Products **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM) -Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of  license or by two letter preposition "**aa_**" in the license feature name. Change of license is realized on command line respectively directly in user's pbs file (see individual products). [ More about licensing here](ansys/licensing/) +Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of  license or by two letter preposition "**aa_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [ More about licensing here](ansys/licensing/) To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module: diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md index c3ae36559..2639a873d 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md @@ -1,7 +1,7 @@ LS-DYNA ======= -[LS-DYNA](http://www.lstc.com/) is a multi-purpose, explicit and implicit finite element program used to analyze the nonlinear dynamic response of structures. Its fully automated contact analysis capability, a wide range of constitutive models to simulate a whole range of engineering materials (steels, composites, foams, concrete, etc.), error-checking features and the high scalability have enabled users worldwide to solve successfully many complex problems. Additionally LS-DYNA is extensively used to simulate impacts on structures from drop tests, underwater shock, explosions or high-velocity impacts. Explosive forming, process engineering, accident reconstruction, vehicle dynamics, thermal brake disc analysis or nuclear safety are further areas in the broad range of possible applications. In leading-edge research LS-DYNA is used to investigate the behaviour of materials like composites, ceramics, concrete, or wood. Moreover, it is used in biomechanics, human modelling, molecular structures, casting, forging, or virtual testing. +[LS-DYNA](http://www.lstc.com/) is a multi-purpose, explicit and implicit finite element program used to analyze the nonlinear dynamic response of structures. Its fully automated contact analysis capability, a wide range of constitutive models to simulate a whole range of engineering materials (steels, composites, foams, concrete, etc.), error-checking features and the high scalability have enabled users worldwide to solve successfully many complex problems. Additionally LS-DYNA is extensively used to simulate impacts on structures from drop tests, underwater shock, explosions or high-velocity impacts. Explosive forming, process engineering, accident reconstruction, vehicle dynamics, thermal brake disc analysis or nuclear safety are further areas in the broad range of possible applications. In leading-edge research LS-DYNA is used to investigate the behavior of materials like composites, ceramics, concrete, or wood. Moreover, it is used in biomechanics, human modeling, molecular structures, casting, forging, or virtual testing. Anselm provides **1 commercial license of LS-DYNA without HPC** support now. @@ -31,6 +31,6 @@ module load lsdyna /apps/engineering/lsdyna/lsdyna700s i=input.k ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution.html). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution.html). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA **.k** file which is attached to the LS-DYNA solver via parameter i= +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA **.k** file which is attached to the LS-DYNA solver via parameter i= diff --git a/docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md b/docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md index 526dbd662..859f6afa0 100644 --- a/docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md +++ b/docs.it4i/anselm-cluster-documentation/software/chemistry/molpro.md @@ -36,7 +36,7 @@ Molpro is compiled for parallel execution using MPI and OpenMP. By default, Molp !!! Note "Note" The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS. -You are advised to use the -d option to point to a directory in [SCRATCH filesystem](../../storage/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch filesystem. +You are advised to use the -d option to point to a directory in [SCRATCH file system](../../storage/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system. ### Example jobscript diff --git a/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md b/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md index 2694f4354..7d9c02bd6 100644 --- a/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md +++ b/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md @@ -42,4 +42,4 @@ Options Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : - MEMORY : controls the amount of memory NWChem will use -- SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct" +- SCRATCH_DIR : set this to a directory in [SCRATCH file system](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g.. "scf direct" diff --git a/docs.it4i/anselm-cluster-documentation/software/compilers.md b/docs.it4i/anselm-cluster-documentation/software/compilers.md index 171891d65..305389fd5 100644 --- a/docs.it4i/anselm-cluster-documentation/software/compilers.md +++ b/docs.it4i/anselm-cluster-documentation/software/compilers.md @@ -9,7 +9,7 @@ Currently there are several compilers for different programming languages availa - Fortran 77/90/95 - Unified Parallel C - Java -- nVidia CUDA +- NVIDIA CUDA The C/C++ and Fortran compilers are divided into two main groups GNU and Intel. @@ -150,6 +150,6 @@ Java ---- For information how to use Java (runtime and/or compiler), please read the [Java page](java/). -nVidia CUDA +NVIDIA CUDA ----------- -For information how to work with nVidia CUDA, please read the [nVidia CUDA page](nvidia-cuda/). \ No newline at end of file +For information how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](nvidia-cuda/). \ No newline at end of file diff --git a/docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md b/docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md index 28fa2fe85..8c3528b4f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md +++ b/docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md @@ -118,4 +118,4 @@ cd /apps/engineering/comsol/comsol43b/mli matlab -nodesktop -nosplash -r "mphstart; addpath /scratch/$USER; test_job" ``` -This example shows how to run Livelink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The Matlab command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number. +This example shows how to run LiveLink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The MATLAB command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md index 9264e27d8..a563ec561 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md @@ -36,7 +36,7 @@ The mpi program will run as usual. The perf-report creates two additional files, Example ------- -In this example, we will be profiling the mympiprog.x MPI program, using Allinea performance reports. Assume that the code is compiled with intel compilers and linked against intel MPI library: +In this example, we will be profiling the mympiprog.x MPI program, using Allinea performance reports. Assume that the code is compiled with Intel compilers and linked against Intel MPI library: First, we allocate some nodes via the express queue: diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md index 95faa664a..10310bc89 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md @@ -31,7 +31,7 @@ CUBE is a graphical application. Refer to Graphical User Interface documentation !!! Note "Note" Analyzing large data sets can consume large amount of CPU and RAM. Do not perform large analysis on login nodes. -After loading the apropriate module, simply launch cube command, or alternatively you can use scalasca -examine command to launch the GUI. Note that for Scalasca datasets, if you do not analyze the data with scalasca -examine before to opening them with CUBE, not all performance data will be available. +After loading the appropriate module, simply launch cube command, or alternatively you can use scalasca -examine command to launch the GUI. Note that for Scalasca datasets, if you do not analyze the data with scalasca -examine before to opening them with CUBE, not all performance data will be available. References 1. <http://www.scalasca.org/software/cube-4.x/download.html> diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md index ff6c5a426..965fe6c73 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-performance-counter-monitor.md @@ -191,7 +191,7 @@ Can be used as a sensor for ksysguard GUI, which is currently not installed on A API --- -In a similar fashion to PAPI, PCM provides a C++ API to access the performance counter from within your application. Refer to the [doxygen documentation](http://intel-pcm-api-documentation.github.io/classPCM.html) for details of the API. +In a similar fashion to PAPI, PCM provides a C++ API to access the performance counter from within your application. Refer to the [Doxygen documentation](http://intel-pcm-api-documentation.github.io/classPCM.html) for details of the API. !!! Note "Note" Due to security limitations, using PCM API to monitor your applications is currently not possible on Anselm. (The application must be run as root user) diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md index a3796c229..677a12150 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md @@ -32,7 +32,7 @@ and launch the GUI : The GUI will open in new window. Click on "*New Project...*" to create a new project. After clicking *OK*, a new window with project properties will appear.  At "*Application:*", select the bath to your binary you want to profile (the binary should be compiled with -g flag). Some additional options such as command line arguments can be selected. At "*Managed code profiling mode:*" select "*Native*" (unless you want to profile managed mode .NET/Mono applications). After clicking *OK*, your project is created. -To run a new analysis, click "*New analysis...*". You will see a list of possible analysis. Some of them will not be possible on the current CPU (eg. Intel Atom analysis is not possible on Sandy Bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "*Advanced Hotspots*". Clicking on *Start *will start profiling of the application. +To run a new analysis, click "*New analysis...*". You will see a list of possible analysis. Some of them will not be possible on the current CPU (e.g.. Intel Atom analysis is not possible on Sandy Bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "*Advanced Hotspots*". Clicking on *Start *will start profiling of the application. Remote Analysis --------------- diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md index c3e16ad32..376b3ee19 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md @@ -19,7 +19,7 @@ To use PAPI, load [module](../../environment-and-modules/) papi: This will load the default version. Execute module avail papi for a list of installed versions. -Utilites +Utilities -------- The bin directory of PAPI (which is automatically added to $PATH upon loading the module) contains various utilites. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md index 34c49f79f..08ae57435 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md @@ -38,7 +38,7 @@ An example : $ scalasca -analyze mpirun -np 4 ./mympiprogram ``` -Some notable Scalsca options are: +Some notable Scalasca options are: **-t Enable trace data collection. By default, only summary data are collected.** **-e <directory> Specify a directory to save the collected data to. By default, Scalasca saves the data to a directory with prefix scorep_, followed by name of the executable and launch configuration.** diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md index 3ac747aa5..4ea4ed33b 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md @@ -260,4 +260,4 @@ Prints this output : (note that there is output printed for every launched MPI p ==31319== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 4 from 4) ``` -We can see that Valgrind has reported use of unitialised memory on the master process (which reads the array to be broadcasted) and use of unaddresable memory on both processes. +We can see that Valgrind has reported use of unitialised memory on the master process (which reads the array to be broadcast) and use of unaddresable memory on both processes. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md index f2a0558e5..1224d6823 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md @@ -1,7 +1,7 @@ hVampir ====== -Vampir is a commercial trace analysis and visualisation tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](../../../salomon/software/debuggers/score-p/)) first to collect the traces. +Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](../../../salomon/software/debuggers/score-p/)) first to collect the traces.  diff --git a/docs.it4i/anselm-cluster-documentation/software/index.md b/docs.it4i/anselm-cluster-documentation/software/index.md index 6ecb43b30..2cbcedb9c 100644 --- a/docs.it4i/anselm-cluster-documentation/software/index.md +++ b/docs.it4i/anselm-cluster-documentation/software/index.md @@ -9,14 +9,14 @@ Anselm Cluster Software * An open-source, multi-platform data analysis and visualization application ## [Compilers](compilers) * Available compilers, including GNU, INTEL and UPC compilers -## [nVidia CUDA](nvidia-cuda) -* A guide to nVidia CUDA programming and GPU usage +## [NVIDIA CUDA](nvidia-cuda) +* A guide to NVIDIA CUDA programming and GPU usage ## [GPI-2](gpi2) * A library that implements the GASPI specification ## [OpenFOAM](openfoam) * A free, open source CFD software package ## [ISV Licenses](isv_licenses) -* A guide to managing Independent Software Vendor licences +* A guide to managing Independent Software Vendor licenses ## [Intel Xeon Phi](intel-xeon-phi) * A guide to Intel Xeon Phi usage ## [Virtualization](kvirtualization) @@ -49,7 +49,7 @@ Anselm Cluster Software ### [HDF5](numerical-libraries/hdf5) ## Omics Master ### [Diagnostic component (TEAM)](omics-master/diagnostic-component-team) -### [Priorization component (BiERApp)](omics-master/priorization-component-bierapp) +### [Prioritization component (BiERapp)](omics-master/priorization-component-bierapp) ### [Overview](omics-master/overview) ## Debuggers * A collection of development tools @@ -69,14 +69,14 @@ Anselm Cluster Software * Interpreted languages for numerical computations ### [Introduction](numerical-languages/introduction) ### [R](numerical-languages/r) -### [Matlab 2013-2014](numerical-languages/matlab_1314) -### [Matlab](numerical-languages/matlab) +### [MATLAB 2013-2014](numerical-languages/matlab_1314) +### [MATLAB](numerical-languages/matlab) ### [Octave](numerical-languages/octave) ## Chemistry * Tools for computational chemistry ### [Molpro](chemistry/molpro) ### [NWChem](chemistry/nwchem) -## Ansys +## ANSYS * An engineering simulation software ### [Introduction](ansys/ansys) ### [ANSYS CFX](ansys/ansys-cfx) diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md index a209a0d17..34d9f3a4d 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md +++ b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md @@ -18,7 +18,7 @@ For maximum performance on the Anselm cluster, compile your programs using the A $ ifort -ipo -O3 -vec -xAVX -vec-report1 myprog.f mysubroutines.f -o myprog.x ``` -In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggresive loop optimizations (-O3) and vectorization (-vec -xAVX) +In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggressive loop optimizations (-O3) and vectorization (-vec -xAVX) The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP parallelization and AVX vectorization. Enable the OpenMP parallelization by the **-openmp** compiler switch. diff --git a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md b/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md index bfadff58d..518d629e3 100644 --- a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md +++ b/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md @@ -1,11 +1,11 @@ ISV Licenses ============ -##A guide to managing Independent Software Vendor licences +##A guide to managing Independent Software Vendor licenses On Anselm cluster there are also installed commercial software applications, also known as ISV (Independent Software Vendor), which are subjects to licensing. The licenses are limited and their usage may be restricted only to some users or user groups. -Currently Flex License Manager based licensing is supported on the cluster for products Ansys, Comsol and Matlab. More information about the applications can be found in the general software section. +Currently Flex License Manager based licensing is supported on the cluster for products ANSYS, Comsol and MATLAB. More information about the applications can be found in the general software section. If an ISV application was purchased for educational (research) purposes and also for commercial purposes, then there are always two separate versions maintained and suffix "edu" is used in the name of the non-commercial version. @@ -56,7 +56,7 @@ Example of the Commercial Matlab license state: License tracking in PBS Pro scheduler and users usage ----------------------------------------------------- -Each feature of each license is accounted and checked by the scheduler of PBS Pro. If you ask for certain licences, the scheduler won't start the job until the asked licenses are free (available). This prevents to crash batch jobs, just because of unavailability of the needed licenses. +Each feature of each license is accounted and checked by the scheduler of PBS Pro. If you ask for certain licenses, the scheduler won't start the job until the asked licenses are free (available). This prevents to crash batch jobs, just because of unavailability of the needed licenses. The general format of the name is: @@ -104,4 +104,4 @@ Run an interactive PBS job with 1 Matlab EDU license, 1 Distributed Computing To $ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=16 -l feature__matlab-edu__MATLAB=1 -l feature__matlab-edu__Distrib_Computing_Toolbox=1 -l feature__matlab-edu__MATLAB_Distrib_Comp_Engine=32 ``` -The license is used and accounted only with the real usage of the product. So in this example, the general Matlab is used after Matlab is run vy the user and not at the time, when the shell of the interactive job is started. Also the Distributed Computing licenses are used at the time, when the user uses the distributed parallel computation in Matlab (e. g. issues pmode start, matlabpool, etc.). +The license is used and accounted only with the real usage of the product. So in this example, the general Matlab is used after Matlab is run by the user and not at the time, when the shell of the interactive job is started. Also the Distributed Computing licenses are used at the time, when the user uses the distributed parallel computation in Matlab (e. g. issues pmode start, matlabpool, etc.). diff --git a/docs.it4i/anselm-cluster-documentation/software/java.md b/docs.it4i/anselm-cluster-documentation/software/java.md index 4755ee2ba..4b708c33e 100644 --- a/docs.it4i/anselm-cluster-documentation/software/java.md +++ b/docs.it4i/anselm-cluster-documentation/software/java.md @@ -25,5 +25,5 @@ With the module loaded, not only the runtime environment (JRE), but also the dev $ which javac ``` -Java applications may use MPI for interprocess communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/). +Java applications may use MPI for inter-process communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [Anselm support](https://support.it4i.cz/rt/). diff --git a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md b/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md index fe7b411f8..1c8eca589 100644 --- a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md +++ b/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md @@ -52,7 +52,7 @@ We propose this job workflow:  -Our recommended solution is that job script creates distinct shared job directory, which makes a central point for data exchange between Anselm's environment, compute node (host) (e.g HOME, SCRATCH, local scratch and other local or cluster filesystems) and virtual machine (guest). Job script links or copies input data and instructions what to do (run script) for virtual machine to job directory and virtual machine process input data according instructions in job directory and store output back to job directory. We recommend, that virtual machine is running in so called [snapshot mode](virtualization/#snapshot-mode), image is immutable - image does not change, so one image can be used for many concurrent jobs. +Our recommended solution is that job script creates distinct shared job directory, which makes a central point for data exchange between Anselm's environment, compute node (host) (e.g. HOME, SCRATCH, local scratch and other local or cluster file systems) and virtual machine (guest). Job script links or copies input data and instructions what to do (run script) for virtual machine to job directory and virtual machine process input data according instructions in job directory and store output back to job directory. We recommend, that virtual machine is running in so called [snapshot mode](virtualization/#snapshot-mode), image is immutable - image does not change, so one image can be used for many concurrent jobs. ### Procedure @@ -232,7 +232,7 @@ Run virtual machine (simple) You can access virtual machine by VNC viewer (option -vnc) connecting to IP address of compute node. For VNC you must use VPN network. -Install virtual machine from iso file +Install virtual machine from ISO file ```bash $ qemu-system-x86_64 -hda linux.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -cdrom linux-install.iso -boot d -vnc :0 @@ -240,7 +240,7 @@ Install virtual machine from iso file $ qemu-system-x86_64 -hda win.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -cdrom win-install.iso -boot d -vnc :0 ``` -Run virtual machine using optimized devices, user network backend with sharing and port forwarding, in snapshot mode +Run virtual machine using optimized devices, user network back-end with sharing and port forwarding, in snapshot mode ```bash $ qemu-system-x86_64 -drive file=linux.img,media=disk,if=virtio -enable-kvm -cpu host -smp 16 -m 32768 -vga std -device virtio-net-pci,netdev=net0 -netdev user,id=net0,smb=/scratch/$USER/tmp,hostfwd=tcp::2222-:22 -vnc :0 -snapshot @@ -255,7 +255,7 @@ Thanks to port forwarding you can access virtual machine via SSH (Linux) or RDP ### Networking and data sharing -For networking virtual machine we suggest to use (default) user network backend (sometimes called slirp). This network backend NATs virtual machines and provides useful services for virtual machines as DHCP, DNS, SMB sharing, port forwarding. +For networking virtual machine we suggest to use (default) user network back-end (sometimes called slirp). This network back-end NATs virtual machines and provides useful services for virtual machines as DHCP, DNS, SMB sharing, port forwarding. In default configuration IP network 10.0.2.0/24 is used, host has IP address 10.0.2.2, DNS server 10.0.2.3, SMB server 10.0.2.4 and virtual machines obtain address from range 10.0.2.15-10.0.2.31. Virtual machines have access to Anselm's network via NAT on compute node (host). @@ -283,7 +283,7 @@ Optimized network setup with sharing and port forwarding **Internet access** -Sometime your virtual machine needs access to internet (install software, updates, software activation, etc). We suggest solution using Virtual Distributed Ethernet (VDE) enabled QEMU with SLIRP running on login node tunnelled to compute node. Be aware, this setup has very low performance, the worst performance of all described solutions. +Sometime your virtual machine needs access to internet (install software, updates, software activation, etc). We suggest solution using Virtual Distributed Ethernet (VDE) enabled QEMU with SLIRP running on login node tunneled to compute node. Be aware, this setup has very low performance, the worst performance of all described solutions. Load VDE enabled QEMU environment module (unload standard QEMU module first if necessary). @@ -303,7 +303,7 @@ Run SLIRP daemon over SSH tunnel on login node and connect it to virtual network $ dpipe vde_plug /tmp/sw0 = ssh login1 $VDE2_DIR/bin/slirpvde -s - --dhcp & ``` -Run qemu using vde network backend, connect to created virtual switch. +Run qemu using vde network back-end, connect to created virtual switch. Basic setup (obsolete syntax) @@ -325,11 +325,11 @@ Optimized setup **TAP interconnect** -Both user and vde network backend have low performance. For fast interconnect (10Gbps and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device. +Both user and vde network back-end have low performance. For fast interconnect (10 Gbit/s and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device. Cluster Anselm provides TAP device tap0 for your job. TAP interconnect does not provide any services (like NAT, DHCP, DNS, SMB, etc.) just raw networking, so you should provide your services if you need them. -Run qemu with TAP network backend: +Run qemu with TAP network back-end: ```bash $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net1 @@ -393,7 +393,7 @@ Run SMB services smbd -s /tmp/qemu-smb/smb.conf ``` -Virtual machine can of course have more than one network interface controller, virtual machine can use more than one network backend. So, you can combine for example use network backend and TAP interconnect. +Virtual machine can of course have more than one network interface controller, virtual machine can use more than one network back-end. So, you can combine for example use network back-end and TAP interconnect. ### Snapshot mode diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md index c5c9baa5e..6892dafa2 100644 --- a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md +++ b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md @@ -114,7 +114,7 @@ Running MPI Programs It is strongly discouraged to mix mpi implementations. Linking an application with one MPI implementation and running mpirun/mpiexec form other implementation may result in unexpected errors. -The MPI program executable must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch filesystem. You need to preload the executable, if running on the local scratch /lscratch filesystem. +The MPI program executable must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch file system. You need to preload the executable, if running on the local scratch /lscratch file system. ### Ways to run MPI programs @@ -129,7 +129,7 @@ Optimal way to run an MPI program depends on its memory requirements, memory acc **One MPI** process per node, using 16 threads, is most useful for memory demanding applications, that make good use of processor cache memory and are not memory bound. This is also a preferred way for communication intensive applications as one process per node enjoys full bandwidth access to the network interface. -**Two MPI** processes per node, using 8 threads each, bound to processor socket is most useful for memory bandwidth bound applications such as BLAS1 or FFT, with scalable memory demand. However, note that the two processes will share access to the network interface. The 8 threads and socket binding should ensure maximum memory access bandwidth and minimize communication, migration and numa effect overheads. +**Two MPI** processes per node, using 8 threads each, bound to processor socket is most useful for memory bandwidth bound applications such as BLAS1 or FFT, with scalable memory demand. However, note that the two processes will share access to the network interface. The 8 threads and socket binding should ensure maximum memory access bandwidth and minimize communication, migration and NUMA effect overheads. !!! Note "Note" Important! Bind every OpenMP thread to a core! diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md index f5b62facd..2398bbb3f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md @@ -68,10 +68,10 @@ For the performance reasons Matlab should use system MPI. On Anselm the supporte extras = {}; ``` -System MPI library allows Matlab to communicate through 40Gbps Infiniband QDR interconnect instead of slower 1Gb ethernet network. +System MPI library allows Matlab to communicate through 40 Gbit/s InfiniBand QDR interconnect instead of slower 1 Gbit Ethernet network. !!! Note "Note" - Please note: The path to MPI library in "mpiLibConf.m" has to match with version of loaded Intel MPI module. In this example the version 4.1.1.036 of Iintel MPI is used by Matlab and therefore module impi/4.1.1.036 has to be loaded prior to starting Matlab. + Please note: The path to MPI library in "mpiLibConf.m" has to match with version of loaded Intel MPI module. In this example the version 4.1.1.036 of Intel MPI is used by Matlab and therefore module impi/4.1.1.036 has to be loaded prior to starting Matlab. ### Parallel Matlab interactive session diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md index 2bc6b01dc..0eb26db09 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md @@ -99,7 +99,7 @@ Octave is linked with parallel Intel MKL, so it best suited for batch processing variable. !!! Note "Note" - Calculations that do not employ parallelism (either by using parallel MKL eg. via matrix operations, fork() function, [parallel package](http://octave.sourceforge.net/parallel/) or other mechanism) will actually run slower than on host CPU. + Calculations that do not employ parallelism (either by using parallel MKL e.g.. via matrix operations, fork() function, [parallel package](http://octave.sourceforge.net/parallel/) or other mechanism) will actually run slower than on host CPU. To use Octave on a node with Xeon Phi: diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md index 627ce6c53..88fe95e72 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/r.md @@ -398,4 +398,4 @@ Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, runnin exit ``` -For more information about jobscripts and MPI execution refer to the [Job submission](../../resource-allocation-and-job-execution/job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. +For more information about jobscript and MPI execution refer to the [Job submission](../../resource-allocation-and-job-execution/job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md index 3964da323..44337602d 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md @@ -3,7 +3,7 @@ FFTW The discrete Fourier transform in one or more dimensions, MPI parallel -FFTW is a C subroutine library for computing the discrete Fourier transform in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, i.e. the discrete cosine/sine transforms or DCT/DST). The FFTW library allows for MPI parallel, in-place discrete Fourier transform, with data distributed over number of nodes. +FFTW is a C subroutine library for computing the discrete Fourier transform in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, e.g. the discrete cosine/sine transforms or DCT/DST). The FFTW library allows for MPI parallel, in-place discrete Fourier transform, with data distributed over number of nodes. Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules: @@ -22,7 +22,7 @@ Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each comp $ module load fftw3 ``` -The module sets up environment variables, required for linking and running fftw enabled applications. Make sure that the choice of fftw module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results. +The module sets up environment variables, required for linking and running FFTW enabled applications. Make sure that the choice of FFTW module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results. Example ------- diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md index aceb866ff..ae758f5e0 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md @@ -24,7 +24,7 @@ Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, comp The module sets up environment variables, required for linking and running HDF5 enabled applications. Make sure that the choice of HDF5 module is consistent with your choice of MPI library. Mixing MPI of different implementations may have unpredictable results. !!! Note "Note" - Be aware, that GCC version of **HDF5 1.8.11** has serious performance issues, since it's compiled with -O0 optimization flag. This version is provided only for testing of code compiled only by GCC and IS NOT recommended for production computations. For more informations, please see: <http://www.hdfgroup.org/ftp/HDF5/prev-releases/ReleaseFiles/release5-1811> + Be aware, that GCC version of **HDF5 1.8.11** has serious performance issues, since it's compiled with -O0 optimization flag. This version is provided only for testing of code compiled only by GCC and IS NOT recommended for production computations. For more information, please see: <http://www.hdfgroup.org/ftp/HDF5/prev-releases/ReleaseFiles/release5-1811> All GCC versions of **HDF5 1.8.13** are not affected by the bug, are compiled with -O3 optimizations and are recommended for production computations. @@ -88,4 +88,4 @@ Load modules and compile: Run the example as [Intel MPI program](../anselm-cluster-documentation/software/mpi/running-mpich2/). -For further informations, please see the website: <http://www.hdfgroup.org/HDF5/> +For further information, please see the website: <http://www.hdfgroup.org/HDF5/> diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md index e914c36fc..8cdcd2b8e 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md @@ -1,7 +1,7 @@ PETSc ===== -PETSc is a suite of building blocks for the scalable solution of scientific and engineering applications modelled by partial differential equations. It supports MPI, shared memory, and GPUs through CUDA or OpenCL, as well as hybrid MPI-shared memory or MPI-GPU parallelism. +PETSc is a suite of building blocks for the scalable solution of scientific and engineering applications modeled by partial differential equations. It supports MPI, shared memory, and GPU through CUDA or OpenCL, as well as hybrid MPI-shared memory or MPI-GPU parallelism. Introduction ------------ diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md index beee8c20e..935fc4f73 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md @@ -5,14 +5,14 @@ Packages for large scale scientific and engineering problems. Provides MPI and h ### Introduction -Trilinos is a collection of software packages for the numerical solution of large scale scientific and engineering problems. It is based on C++ and feautures modern object-oriented design. Both serial as well as parallel computations based on MPI and hybrid parallelization are supported within Trilinos packages. +Trilinos is a collection of software packages for the numerical solution of large scale scientific and engineering problems. It is based on C++ and features modern object-oriented design. Both serial as well as parallel computations based on MPI and hybrid parallelization are supported within Trilinos packages. ### Installed packages Current Trilinos installation on ANSELM contains (among others) the following main packages - **Epetra** - core linear algebra package containing classes for manipulation with serial and distributed vectors, matrices, and graphs. Dense linear solvers are supported via interface to BLAS and LAPACK (Intel MKL on ANSELM). Its extension **EpetraExt** contains e.g. methods for matrix-matrix multiplication. -- **Tpetra** - next-generation linear algebra package. Supports 64bit indexing and arbitrary data type using C++ templates. +- **Tpetra** - next-generation linear algebra package. Supports 64 bit indexing and arbitrary data type using C++ templates. - **Belos** - library of various iterative solvers (CG, block CG, GMRES, block GMRES etc.). - **Amesos** - interface to direct sparse solvers. - **Anasazi** - framework for large-scale eigenvalue algorithms. @@ -25,7 +25,7 @@ For the full list of Trilinos packages, descriptions of their capabilities, and Currently, Trilinos in version 11.2.3 compiled with Intel Compiler is installed on ANSELM. -### Compilling against Trilinos +### Compiling against Trilinos First, load the appropriate module: diff --git a/docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md b/docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md index fab15f433..015750845 100644 --- a/docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md +++ b/docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md @@ -1,7 +1,7 @@ -nVidia CUDA +NVIDIA CUDA =========== -##A guide to nVidia CUDA programming and GPU usage +##A guide to NVIDIA CUDA programming and GPU usage CUDA Programming on Anselm -------------------------- @@ -196,11 +196,11 @@ To run the code use interactive PBS session to get access to one of the GPU acce CUDA Libraries -------------- -### CuBLAS +### cuBLAS The NVIDIA CUDA Basic Linear Algebra Subroutines (cuBLAS) library is a GPU-accelerated version of the complete standard BLAS library with 152 standard BLAS routines. Basic description of the library together with basic performance comparison with MKL can be found [here](https://developer.nvidia.com/cublas "Nvidia cuBLAS"). -**CuBLAS example: SAXPY** +**cuBLAS example: SAXPY** SAXPY function multiplies the vector x by the scalar alpha and adds it to the vector y overwriting the latest vector with the result. The description of the cuBLAS function can be found in [NVIDIA CUDA documentation](http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-axpy "Nvidia CUDA documentation "). Code can be pasted in the file and compiled without any modification. diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md b/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md index 00de9c54a..1d323a7b8 100644 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md +++ b/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md @@ -10,7 +10,7 @@ TEAM is available at the following address: <http://omics.it4i.cz/team/> ### Diagnostic component (TEAM) -VCF files are scanned by this diagnostic tool for known diagnostic disease-associated variants. When no diagnostic mutation is found, the file can be sent to the disease-causing gene discovery tool to see wheter new disease associated variants can be found. +VCF files are scanned by this diagnostic tool for known diagnostic disease-associated variants. When no diagnostic mutation is found, the file can be sent to the disease-causing gene discovery tool to see whether new disease associated variants can be found. TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between the predicted mutations and the final diagnostic in targeted enrichment sequencing analysis. The tool searches for known diagnostic mutations, corresponding to a disease panel, among the predicted patient’s variants. Diagnostic variants for the disease are taken from four databases of disease-related variants (HGMD-public, HUMSAVAR , ClinVar and COSMIC) If no primary diagnostic variant is found, then a list of secondary findings that can help to establish a diagnostic is produced. TEAM also provides with an interface for the definition of and customization of panels, by means of which, genes and mutations can be added or discarded to adjust panel definitions. diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md b/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md index ac37e6d56..439feb9fb 100644 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md +++ b/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md @@ -1,14 +1,14 @@ -Priorization component (BiERApp) +Prioritization component (BiERapp) ================================ ### Access -BiERApp is available at the following address: <http://omics.it4i.cz/bierapp/> +BiERapp is available at the following address: <http://omics.it4i.cz/bierapp/> !!! Note "Note" The address is accessible onlyvia VPN. -###BiERApp +###BiERapp **This tool is aimed to discover new disease genes or variants by studying affected families or cases and controls. It carries out a filtering process to sequentially remove: (i) variants which are not no compatible with the disease because are not expected to have impact on the protein function; (ii) variants that exist at frequencies incompatible with the disease; (iii) variants that do not segregate with the disease. The result is a reduced set of disease gene candidates that should be further validated experimentally.** @@ -16,6 +16,6 @@ BiERapp (28) efficiently helps in the identification of causative variants in  -**Figure 6**. Web interface to the prioritization tool.This figure shows the interface of the web tool for candidate gene +**Figure 6**. Web interface to the prioritization tool. This figure shows the interface of the web tool for candidate gene prioritization with the filters available. The tool includes a genomic viewer (Genome Maps 30) that enables the representation of the variants in the corresponding genomic coordinates. diff --git a/docs.it4i/anselm-cluster-documentation/software/operating-system.md b/docs.it4i/anselm-cluster-documentation/software/operating-system.md index 9fbbe0e9c..03a9cf45b 100644 --- a/docs.it4i/anselm-cluster-documentation/software/operating-system.md +++ b/docs.it4i/anselm-cluster-documentation/software/operating-system.md @@ -1,7 +1,7 @@ Operating System =============== -The operating system on Anselm is Linux - **bullx Linux Server release 6.X** +The operating system on Anselm is Linux - **bullx Linux Server release 6.x** bullx Linux is based on Red Hat Enterprise Linux. bullx Linux is a Linux distribution provided by Bull and dedicated to HPC applications. diff --git a/docs.it4i/anselm-cluster-documentation/software/paraview.md b/docs.it4i/anselm-cluster-documentation/software/paraview.md index 6ed62410f..a29e8ec5f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/paraview.md +++ b/docs.it4i/anselm-cluster-documentation/software/paraview.md @@ -49,7 +49,7 @@ Note the that the server is listening on compute node cn77 in this case, we shal ### Client connection -Because a direct connection is not allowed to compute nodes on Anselm, you must establish a SSH tunnel to connect to the server. Choose a port number on your PC to be forwarded to ParaView server, for example 12345. If your PC is running Linux, use this command to estabilish a SSH tunnel: +Because a direct connection is not allowed to compute nodes on Anselm, you must establish a SSH tunnel to connect to the server. Choose a port number on your PC to be forwarded to ParaView server, for example 12345. If your PC is running Linux, use this command to establish a SSH tunnel: ```bash ssh -TN -L 12345:cn77:11111 username@anselm.it4i.cz diff --git a/docs.it4i/anselm-cluster-documentation/storage.md b/docs.it4i/anselm-cluster-documentation/storage.md index 979e057a2..ce6acc114 100644 --- a/docs.it4i/anselm-cluster-documentation/storage.md +++ b/docs.it4i/anselm-cluster-documentation/storage.md @@ -1,7 +1,7 @@ Storage ======= -There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared filesystems. Compute nodes are also equipped with local (non-shared) scratch, ramdisk and tmp filesystems. +There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, ramdisk and tmp file systems. Archiving --------- @@ -123,10 +123,10 @@ Default stripe size is 1MB, stripe count is 1. There are 22 OSTs dedicated for t |HOME filesystem|| |---|---| |Mountpoint|/home| -|Capacity|320TB| -|Throughput|2GB/s| -|User quota|250GB| -|Default stripe size|1MB| +|Capacity|320 TB| +|Throughput|2 GB/s| +|User quota|250 GB| +|Default stripe size|1 MB| |Default stripe count|1| |Number of OSTs|22| @@ -342,10 +342,10 @@ The procedure to obtain the CESNET access is quick and trouble-free. CESNET storage access ------------ -### Understanding Cesnet storage +### Understanding CESNET storage !!! Note "Note" - It is very important to understand the Cesnet storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. + It is very important to understand the CESNET storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. Once registered for CESNET Storage, you may [access the storage](https://du.cesnet.cz/en/navody/faq/start) in number of ways. We recommend the SSHFS and RSYNC methods. @@ -354,9 +354,9 @@ Once registered for CESNET Storage, you may [access the storage](https://du.cesn !!! Note "Note" SSHFS: The storage will be mounted like a local hard drive -The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable harddrive. Files can be than copied in and out in a usual fashion. +The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable hard disk drive. Files can be than copied in and out in a usual fashion. -First, create the mountpoint +First, create the mount point ```bash $ mkdir cesnet @@ -405,18 +405,18 @@ Rsync finds files that need to be transferred using a "quick check" algorithm (b More about Rsync at <https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele> -Transfer large files to/from Cesnet storage, assuming membership in the Storage VO +Transfer large files to/from CESNET storage, assuming membership in the Storage VO ```bash $ rsync --progress datafile username@ssh.du1.cesnet.cz:VO_storage-cache_tape/. $ rsync --progress username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafile . ``` -Transfer large directories to/from Cesnet storage, assuming membership in the Storage VO +Transfer large directories to/from CESNET storage, assuming membership in the Storage VO ```bash $ rsync --progress -av datafolder username@ssh.du1.cesnet.cz:VO_storage-cache_tape/. $ rsync --progress -av username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafolder . ``` -Transfer rates of about 28MB/s can be expected. +Transfer rates of about 28 MB/s can be expected. diff --git a/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md b/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md index 347236d0d..e94a3676c 100644 --- a/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md +++ b/docs.it4i/get-started-with-it4innovations/obtaining-login-credentials/certificates-faq.md @@ -172,4 +172,4 @@ Q: What is the MyProxy service? Q: Someone may have copied or had access to the private key of my certificate either in a separate file or in the browser. What should I do? -------------------------------------------------------------------------------------------------------------------------------------------- -Please ask the CA that issued your certificate to revoke this certifcate and to supply you with a new one. In addition, please report this to IT4Innovations by contacting [the support team](https://support.it4i.cz/rt). +Please ask the CA that issued your certificate to revoke this certificate and to supply you with a new one. In addition, please report this to IT4Innovations by contacting [the support team](https://support.it4i.cz/rt). diff --git a/docs.it4i/salomon/compute-nodes.md b/docs.it4i/salomon/compute-nodes.md index c6e17ebea..d30f29688 100644 --- a/docs.it4i/salomon/compute-nodes.md +++ b/docs.it4i/salomon/compute-nodes.md @@ -84,26 +84,26 @@ Memory is equally distributed across all CPUs and cores for optimal performance. - 2 sockets - Memory Controllers are integrated into processors. - - 8 DDR4 DIMMS per node - - 4 DDR4 DIMMS per CPU - - 1 DDR4 DIMMS per channel -- Populated memory: 8x 16GB DDR4 DIMM >2133MHz + - 8 DDR4 DIMMs per node + - 4 DDR4 DIMMs per CPU + - 1 DDR4 DIMMs per channel +- Populated memory: 8 x 16 GB DDR4 DIMM >2133 MHz ### Compute Node With MIC Accelerator 2 sockets Memory Controllers are integrated into processors. -- 8 DDR4 DIMMS per node -- 4 DDR4 DIMMS per CPU -- 1 DDR4 DIMMS per channel +- 8 DDR4 DIMMs per node +- 4 DDR4 DIMMs per CPU +- 1 DDR4 DIMMs per channel -Populated memory: 8x 16GB DDR4 DIMM 2133MHz +Populated memory: 8 x 16 GB DDR4 DIMM 2133 MHz MIC Accelerator Intel Xeon Phi 7120P Processor - 2 sockets - Memory Controllers are are connected via an Interprocessor Network (IPN) ring. - - 16 GDDR5 DIMMS per node - - 8 GDDR5 DIMMS per CPU - - 2 GDDR5 DIMMS per channel + - 16 GDDR5 DIMMs per node + - 8 GDDR5 DIMMs per CPU + - 2 GDDR5 DIMMs per channel diff --git a/docs.it4i/salomon/environment-and-modules.md b/docs.it4i/salomon/environment-and-modules.md index 50ae7f5c6..8f6fb590f 100644 --- a/docs.it4i/salomon/environment-and-modules.md +++ b/docs.it4i/salomon/environment-and-modules.md @@ -25,7 +25,7 @@ fi ``` !!! Note "Note" - Do not run commands outputing to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. + Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. How to using modules in examples: <tty-player controls src=/src/salomon/modules_salomon.ttyrec></tty-player> @@ -71,13 +71,13 @@ To check available modules use $ module avail ``` -To load a module, for example the OpenMPI module use +To load a module, for example the Open MPI module use ```bash $ module load OpenMPI ``` -loading the OpenMPI module will set up paths and environment variables of your active shell such that you are ready to run the OpenMPI software +loading the Open MPI module will set up paths and environment variables of your active shell such that you are ready to run the Open MPI software To check loaded modules use @@ -85,7 +85,7 @@ To check loaded modules use $ module list ``` -To unload a module, for example the OpenMPI module use +To unload a module, for example the Open MPI module use ```bash $ module unload OpenMPI @@ -99,7 +99,7 @@ $ man module ### EasyBuild Toolchains -As we wrote earlier, we are using EasyBuild for automatised software installation and module creation. +As we wrote earlier, we are using EasyBuild for automatized software installation and module creation. EasyBuild employs so-called **compiler toolchains** or, simply toolchains for short, which are a major concept in handling the build and installation processes. @@ -112,8 +112,8 @@ The EasyBuild framework prepares the build environment for the different toolc Recent releases of EasyBuild include out-of-the-box toolchain support for: - various compilers, including GCC, Intel, Clang, CUDA -- common MPI libraries, such as Intel MPI, MPICH, MVAPICH2, OpenMPI -- various numerical libraries, including ATLAS, Intel MKL, OpenBLAS, ScalaPACK, FFTW +- common MPI libraries, such as Intel MPI, MPICH, MVAPICH2, Open MPI +- various numerical libraries, including ATLAS, Intel MKL, OpenBLAS, ScaLAPACK, FFTW On Salomon, we have currently following toolchains installed: diff --git a/docs.it4i/salomon/hardware-overview.md b/docs.it4i/salomon/hardware-overview.md index a16890dfe..90ee2595e 100644 --- a/docs.it4i/salomon/hardware-overview.md +++ b/docs.it4i/salomon/hardware-overview.md @@ -55,6 +55,6 @@ For large memory computations a special SMP/NUMA SGI UV 2000 server is available |Node |Count |Processor |Cores|Memory|Extra HW | | --- | --- | --- | --- | --- | --- | -|UV2000 |1 |14x Intel Xeon E5-4627v2, 3.3GHz, 8cores |112 |3328GB DDR3@1866MHz |2x 400GB local SSD1x NVIDIA GM200(GeForce GTX TITAN X),12GB RAM | +|UV2000 |1 |14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores |112 |3328 GB DDR3@1866 MHz |2 x 400GB local SSD1x NVIDIA GM200(GeForce GTX TITAN X),12 GB RAM |  diff --git a/docs.it4i/salomon/ib-single-plane-topology.md b/docs.it4i/salomon/ib-single-plane-topology.md index b6c9f002b..34c43034e 100644 --- a/docs.it4i/salomon/ib-single-plane-topology.md +++ b/docs.it4i/salomon/ib-single-plane-topology.md @@ -1,7 +1,7 @@ IB single-plane topology ======================== -A complete M-Cell assembly consists of four compute racks. Each rack contains 4x physical IRUs - Independent rack units. Using one dual socket node per one blade slot leads to 8 logical IRUs. Each rack contains 4x2 SGI ICE X IB Premium Blades. +A complete M-Cell assembly consists of four compute racks. Each rack contains 4 x physical IRUs - Independent rack units. Using one dual socket node per one blade slot leads to 8 logical IRUs. Each rack contains 4 x 2 SGI ICE X IB Premium Blades. The SGI ICE X IB Premium Blade provides the first level of interconnection via dual 36-port Mellanox FDR InfiniBand ASIC switch with connections as follows: @@ -9,9 +9,9 @@ The SGI ICE X IB Premium Blade provides the first level of interconnection via d - 3 ports on each chip provide connectivity between the chips - 24 ports from each switch chip connect to the external bulkhead, for a total of 48 -###IB single-plane topology - ICEX Mcell +###IB single-plane topology - ICEX M-Cell -Each colour in each physical IRU represents one dual-switch ASIC switch. +Each color in each physical IRU represents one dual-switch ASIC switch. [IB single-plane topology - ICEX Mcell.pdf](../src/IB single-plane topology - ICEX Mcell.pdf) @@ -19,13 +19,13 @@ Each colour in each physical IRU represents one dual-switch ASIC switch. ### IB single-plane topology - Accelerated nodes -Each of the 3 inter-connected D racks are equivalent to one half of Mcell rack. 18x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 Mcell racks as shown in a diagram [7D Enhanced Hypercube](7d-enhanced-hypercube/). +Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](7d-enhanced-hypercube/). As shown in a diagram  -- Racks 21, 22, 23, 24, 25, 26 are equivalent to one Mcell rack. -- Racks 27, 28, 29, 30, 31, 32 are equivalent to one Mcell rack. -- Racks 33, 34, 35, 36, 37, 38 are equivalent to one Mcell rack. +- Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack. +- Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack. +- Racks 33, 34, 35, 36, 37, 38 are equivalent to one M-Cell rack. [IB single-plane topology - Accelerated nodes.pdf](../src/IB single-plane topology - Accelerated nodes.pdf) diff --git a/docs.it4i/salomon/introduction.md b/docs.it4i/salomon/introduction.md index bda0fd83c..cd71cd664 100644 --- a/docs.it4i/salomon/introduction.md +++ b/docs.it4i/salomon/introduction.md @@ -1,7 +1,7 @@ Introduction ============ -Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128GB RAM. Nodes are interconnected by 7D Enhanced hypercube Infiniband network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). +Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128 GB RAM. Nodes are interconnected by 7D Enhanced hypercube InfiniBand network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). The cluster runs [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [ Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) diff --git a/docs.it4i/salomon/job-priority.md b/docs.it4i/salomon/job-priority.md index c3ef75950..5f479260d 100644 --- a/docs.it4i/salomon/job-priority.md +++ b/docs.it4i/salomon/job-priority.md @@ -31,7 +31,7 @@ Fairshare priority is calculated as where MAX_FAIRSHARE has value 1E6, usage~Project~ is cumulated usage by all members of selected project, usage~Total~ is total usage by all users, by all projects. -Usage counts allocated corehours (ncpus*walltime). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week). Jobs queued in queue qexp are not calculated to project's usage. +Usage counts allocated core hours (ncpus*walltime). Usage is decayed, or cut in half periodically, at the interval 168 hours (one week). Jobs queued in queue qexp are not calculated to project's usage. !!! Note "Note" Calculated usage and fairshare priority can be seen at <https://extranet.it4i.cz/rsweb/salomon/projects>. @@ -65,7 +65,7 @@ It means, that jobs with lower execution priority can be run before jobs with hi !!! Note "Note" It is **very beneficial to specify the walltime** when submitting jobs. -Specifying more accurate walltime enables better schedulling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority. +Specifying more accurate walltime enables better scheduling, better execution times and better resource usage. Jobs with suitable (small) walltime could be backfilled - and overtake job(s) with higher priority. ### Job placement diff --git a/docs.it4i/salomon/network.md b/docs.it4i/salomon/network.md index beb5b7edc..d634a9e24 100644 --- a/docs.it4i/salomon/network.md +++ b/docs.it4i/salomon/network.md @@ -1,10 +1,10 @@ Network ======= -All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet) -network. Only [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network may be used to transfer user data. +All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network and by Gigabit [Ethernet](http://en.wikipedia.org/wiki/Ethernet) +network. Only [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network may be used to transfer user data. -Infiniband Network +InfiniBand Network ------------------ All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](7d-enhanced-hypercube/). diff --git a/docs.it4i/salomon/prace.md b/docs.it4i/salomon/prace.md index 1f4e771b9..7ceb1b805 100644 --- a/docs.it4i/salomon/prace.md +++ b/docs.it4i/salomon/prace.md @@ -235,9 +235,9 @@ For PRACE users, the default production run queue is "qprace". PRACE users can a |queue|Active project|Project resources|Nodes|priority|authorization|walltime | |---|---|---|---|---|---|---| - |**qexp** Express queue|no|none required|32 nodes, max 8 per user|150|no|1 / 1h| - |**qprace** Production queue|yes|>0|1006 nodes, max 86 per job|0|no|24 / 48h| - |**qfree** Free resource queue|yes|none required|752 nodes, max 86 per job|-1024|no|12 / 12h| + |**qexp** Express queue|no|none required|32 nodes, max 8 per user|150|no|1 / 1 h| + |**qprace** Production queue|yes|>0|1006 nodes, max 86 per job|0|no|24 / 48 h| + |**qfree** Free resource queue|yes|none required|752 nodes, max 86 per job|-1024|no|12 / 12 h| **qprace**, the PRACE This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprace. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprace is 48 hours. If the job needs longer time, it must use checkpoint/restart functionality. diff --git a/docs.it4i/salomon/resource-allocation-and-job-execution.md b/docs.it4i/salomon/resource-allocation-and-job-execution.md index 562a07636..597f034b6 100644 --- a/docs.it4i/salomon/resource-allocation-and-job-execution.md +++ b/docs.it4i/salomon/resource-allocation-and-job-execution.md @@ -1,7 +1,7 @@ Resource Allocation and Job Execution ===================================== -To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive informations about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide. +To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro-documentation/pbspro/), especially in the PBS Pro User's Guide. Resources Allocation Policy --------------------------- diff --git a/docs.it4i/salomon/software/compilers.md b/docs.it4i/salomon/software/compilers.md index a5703fd4b..283761ac0 100644 --- a/docs.it4i/salomon/software/compilers.md +++ b/docs.it4i/salomon/software/compilers.md @@ -128,7 +128,7 @@ To run the example with 5 threads issue $ ./count.upc.x -fupc-threads-5 ``` -For more informations see the man pages. +For more information see the man pages. ### Berkley UPC Compiler @@ -141,7 +141,7 @@ To use the Berkley UPC compiler and runtime environment to run the binaries use As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only. -For production runs, it is recommended to use the native Infiband implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. Please note, that the selection of the network is done at the compile time and not at runtime (as expected)! +For production runs, it is recommended to use the native InfiniBand implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. Please note, that the selection of the network is done at the compile time and not at runtime (as expected)! Example UPC code: @@ -182,12 +182,12 @@ To run the example on two compute nodes using all 48 cores, with 48 threads, iss $ upcrun -n 48 ./hello.upc.x ``` -For more informations see the man pages. +For more information see the man pages. ##Java For information how to use Java (runtime and/or compiler), please read the [Java page](java/). -##nVidia CUDA +##NVIDIA CUDA -For information how to work with nVidia CUDA, please read the [nVidia CUDA page](../../anselm-cluster-documentation/software/nvidia-cuda/). +For information how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](../../anselm-cluster-documentation/software/nvidia-cuda/). diff --git a/docs.it4i/salomon/software/index.md b/docs.it4i/salomon/software/index.md index 6fdb05a03..da0befc72 100644 --- a/docs.it4i/salomon/software/index.md +++ b/docs.it4i/salomon/software/index.md @@ -26,7 +26,7 @@ Salomon Cluster Software * Message Passing Interface libraries ### [Introduction](mpi/mpi) ### [MPI4Py (MPI for Python)](mpi/mpi4py-mpi-for-python) -### [Running OpenMPI](mpi/Running_OpenMPI) +### [Running Open MPI](mpi/Running_OpenMPI) ## Debuggers * A collection of development tools ### [Introduction](debuggers/Introduction) @@ -41,18 +41,18 @@ Salomon Cluster Software * Interpreted languages for numerical computations ### [Introduction](numerical-languages/introduction) ### [R](numerical-languages/r) -### [Matlab](numerical-languages/matlab) +### [MATLAB](numerical-languages/matlab) ### [Octave](numerical-languages/octave) ## Chemistry * Tools for computational chemistry ### [Molpro](chemistry/molpro) ### [Phono3py](chemistry/phono3py) ### [NWChem](chemistry/nwchem) -## Comsol +## COMSOL * A finite element analysis, solver and Simulation software ### [COMSOL](comsol/comsol-multiphysics) ### [Licensing and Available Versions](comsol/licensing-and-available-versions) -## Ansys +## ANSYS * An engineering simulation software ### [Introduction](ansys/ansys) ### [Workbench](ansys/workbench) diff --git a/docs.it4i/salomon/software/java.md b/docs.it4i/salomon/software/java.md index 5600b32c6..ca5b0bf39 100644 --- a/docs.it4i/salomon/software/java.md +++ b/docs.it4i/salomon/software/java.md @@ -25,4 +25,4 @@ With the module loaded, not only the runtime environment (JRE), but also the dev $ which javac ``` -Java applications may use MPI for interprocess communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [cluster support](https://support.it4i.cz/rt/). +Java applications may use MPI for inter-process communication, in conjunction with Open MPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, please contact [cluster support](https://support.it4i.cz/rt/). diff --git a/docs.it4i/salomon/software/operating-system.md b/docs.it4i/salomon/software/operating-system.md index 3112b6359..cff491402 100644 --- a/docs.it4i/salomon/software/operating-system.md +++ b/docs.it4i/salomon/software/operating-system.md @@ -1,6 +1,6 @@ Operating System ================ -The operating system on Salomon is Linux - **CentOS 6.X** +The operating system on Salomon is Linux - **CentOS 6.x** The CentOS Linux distribution is a stable, predictable, manageable and reproducible platform derived from the sources of Red Hat Enterprise Linux (RHEL). \ No newline at end of file diff --git a/docs.it4i/salomon/storage.md b/docs.it4i/salomon/storage.md index 69b7e2729..5f8306aa2 100644 --- a/docs.it4i/salomon/storage.md +++ b/docs.it4i/salomon/storage.md @@ -344,10 +344,10 @@ The procedure to obtain the CESNET access is quick and trouble-free. CESNET storage access --------------------- -### Understanding Cesnet storage +### Understanding CESNET storage !!! Note "Note" - It is very important to understand the Cesnet storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. + It is very important to understand the CESNET storage before uploading data. Please read <https://du.cesnet.cz/en/navody/home-migrace-plzen/start> first. Once registered for CESNET Storage, you may [access the storage](https://du.cesnet.cz/en/navody/faq/start) in number of ways. We recommend the SSHFS and RSYNC methods. @@ -356,9 +356,9 @@ Once registered for CESNET Storage, you may [access the storage](https://du.cesn !!! Note "Note" SSHFS: The storage will be mounted like a local hard drive -The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable harddrive. Files can be than copied in and out in a usual fashion. +The SSHFS provides a very convenient way to access the CESNET Storage. The storage will be mounted onto a local directory, exposing the vast CESNET Storage as if it was a local removable hard disk drive. Files can be than copied in and out in a usual fashion. -First, create the mountpoint +First, create the mount point ```bash $ mkdir cesnet @@ -407,18 +407,18 @@ Rsync finds files that need to be transferred using a "quick check" algorithm (b More about Rsync at <https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele> -Transfer large files to/from Cesnet storage, assuming membership in the Storage VO +Transfer large files to/from CESNET storage, assuming membership in the Storage VO ```bash $ rsync --progress datafile username@ssh.du1.cesnet.cz:VO_storage-cache_tape/. $ rsync --progress username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafile . ``` -Transfer large directories to/from Cesnet storage, assuming membership in the Storage VO +Transfer large directories to/from CESNET storage, assuming membership in the Storage VO ```bash $ rsync --progress -av datafolder username@ssh.du1.cesnet.cz:VO_storage-cache_tape/. $ rsync --progress -av username@ssh.du1.cesnet.cz:VO_storage-cache_tape/datafolder . ``` -Transfer rates of about 28MB/s can be expected. +Transfer rates of about 28 MB/s can be expected. -- GitLab