diff --git a/.spelling b/.spelling index 8bb1fc223af4955de6d57fa26186b23630386154..1f14925ff219e40702c0d98b7f05d95603a2aedd 100644 --- a/.spelling +++ b/.spelling @@ -25,6 +25,7 @@ TotalView Valgrind ParaView OpenFOAM +MAX_FAIRSHARE MPI4Py MPICH2 PETSc @@ -77,6 +78,173 @@ AnyConnect X11 backfilling backfilled +SCP +Lustre +QDR +TFLOP +ncpus +myjob +pernode +mpiprocs +ompthreads +qprace +runtime +SVS +ppn +Multiphysics +aeroacoustics +turbomachinery +CFD +LS-DYNA +APDL +MAPDL +multiphysics +AUTODYN +RSM +Molpro +initio +parallelization +NWChem +SCF +ISV +profiler +Pthreads +profilers +OTF +PAPI +PCM +uncore +pre-processing +prepend +CXX +prepended +POMP2 +Memcheck +unaddressable +OTF2 +GPI-2 +GASPI +GPI +MKL +IPP +TBB +GSL +Omics +VNC +Scalasca +IFORT +interprocedural +IDB +cloop +qcow +qcow2 +vmdk +vdi +virtio +paravirtualized +Gbit +tap0 +UDP +TCP +preload +qfat +Rmpi +DCT +datasets +dataset +preconditioners +partitioners +PARDISO +PaStiX +SuiteSparse +SuperLU +ExodusII +NetCDF +ParMETIS +multigrid +HYPRE +SPAI +Epetra +EpetraExt +Tpetra +64-bit +Belos +GMRES +Amesos +IFPACK +preconditioner +Teuchos +Makefiles +SAXPY +NVCC +VCF +HGMD +HUMSAVAR +ClinVar +indels +CIBERER +exomes +tmp +SSHFS +RSYNC +unmount +Cygwin +CygwinX +RFB +TightVNC +TigerVNC +GUIs +XLaunch +UTF-8 +numpad +PuTTYgen +OpenSSH +IE11 +x86 +r21u01n577 +7120P +interprocessor +IPN +toolchains +toolchain +APIs +easyblocks +GM200 +GeForce +GTX +IRUs +ASIC +backplane +ICEX +IRU +PFLOP +T950B +ifconfig +inet +addr +checkbox +appfile +programmatically +http +https +filesystem +phono3py +HDF +splitted +automize +llvm +PGI +GUPC +BUPC +IBV +Aislinn +nondeterminism +stdout +stderr +i.e. +pthreads +uninitialised +broadcasted - docs.it4i/anselm-cluster-documentation/environment-and-modules.md MODULEPATH bashrc @@ -110,6 +278,7 @@ Rmax E5-2665 E5-2470 P5110 +isw - docs.it4i/anselm-cluster-documentation/introduction.md RedHat - docs.it4i/anselm-cluster-documentation/job-priority.md @@ -117,6 +286,8 @@ walltime qexp _List.fairshare _time +_FAIRSHARE +1E6 - docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md 15209.srv11 qsub @@ -137,6 +308,15 @@ jobscript cn108 cn109 cn110 +Name0 +cn17 +_NODEFILE +_O +_WORKDIR +mympiprog.x +_JOBID +myprog.x +openmpi - docs.it4i/anselm-cluster-documentation/network.md ib0 - docs.it4i/anselm-cluster-documentation/prace.md @@ -144,14 +324,19 @@ PRACE qfree it4ifree it4i.portal.clients +prace +1h - docs.it4i/anselm-cluster-documentation/shell-and-data-access.md VPN - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md ANSYS CFX cfx.pbs +_r +ane3fl - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-mechanical-apdl.md mapdl.pbs +_dy - docs.it4i/anselm-cluster-documentation/software/ansys/ls-dyna.md HPC lsdyna.pbs @@ -166,9 +351,25 @@ Makefile - docs.it4i/anselm-cluster-documentation/software/gpi2.md gcc cn79 +helloworld +_gpi.c +ibverbs +gaspi +_logger - docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md Haswell CPUs +ipo +O3 +vec +xAVX +omp +simd +ivdep +pragmas +openmp +xCORE-AVX2 +axCORE-AVX2 - docs.it4i/anselm-cluster-documentation/software/kvirtualization.md rc.local runlevel @@ -180,6 +381,8 @@ VDE smb.conf TMPDIR run.bat. +slirp +NATs - docs.it4i/anselm-cluster-documentation/software/mpi/mpi4py-mpi-for-python.md NumPy - docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md @@ -188,33 +391,73 @@ matlabcode.m output.out matlabcodefile sched +_feature - docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md UV2000 +maxNumCompThreads +SalomonPBSPro - docs.it4i/anselm-cluster-documentation/software/numerical-languages/octave.md _THREADS +_NUM - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md CMake-aware Makefile.export +_PACKAGE +_CXX +_COMPILER +_INCLUDE +_DIRS +_LIBRARY - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md ansysdyna.pbs - docs.it4i/anselm-cluster-documentation/software/ansys/ansys.md svsfem.cz +_ - docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md libmpiwrap-amd64-linux +O0 +valgrind +malloc +_PRELOAD - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/magma-for-intel-xeon-phi.md cn204 +_LIBS +MAGMAROOT +_magma +_server +_anselm +_from +_mic.sh +_dgetrf +_mic +_03.pdf - docs.it4i/anselm-cluster-documentation/software/paraview.md cn77 localhost +v4.0.1 - docs.it4i/anselm-cluster-documentation/storage.md ssh.du1.cesnet.cz Plzen ssh.du2.cesnet.cz ssh.du3.cesnet.cz +tier1 +_home +_cache +_tape - docs.it4i/salomon/environment-and-modules.md icc +ictce +ifort +imkl +intel +gompi +goolf +BLACS +iompi +iccifort - docs.it4i/salomon/hardware-overview.md HW +E5-4627v2 - docs.it4i/salomon/job-submission-and-execution.md 15209.isrv5 r21u01n577 @@ -239,6 +482,7 @@ mkdir mympiprog.x mpiexec myprog.x +r4i7n0.ib0.smc.salomon.it4i.cz - docs.it4i/salomon/7d-enhanced-hypercube.md cns1 cns576 @@ -247,7 +491,165 @@ r4i7n17 cns577 cns1008 r37u31n1008 +7D - docs.it4i/anselm-cluster-documentation/resources-allocation-policy.md qsub it4ifree it4i.portal.clients + - docs.it4i/anselm-cluster-documentation/software/ansys/ansys-fluent.md +anslic +_admin + - docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md +_DIR + - docs.it4i/anselm-cluster-documentation/software/comsol-multiphysics.md +EDU +comsol +_matlab.pbs +_job.m +mphstart + - docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md +perf-report +perf +txt +html +mympiprog +_32p + - docs.it4i/anselm-cluster-documentation/software/debuggers/intel-vtune-amplifier.md +Hotspots + - docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md +scorep + - docs.it4i/anselm-cluster-documentation/software/isv_licenses.md +edu +ansys +_features +_state.txt +f1 +matlab +acfd +_ansys +_acfd +_aa +_comsol +HEATTRANSFER +_HEATTRANSFER +COMSOLBATCH +_COMSOLBATCH +STRUCTURALMECHANICS +_STRUCTURALMECHANICS +_matlab +_Toolbox +_Image +_Distrib +_Comp +_Engine +_Acquisition +pmode +matlabpool + - docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md +mpirun +BLAS1 +FFT +KMP +_AFFINITY +GOMP +_CPU +bullxmpi-1 +mpich2 + - docs.it4i/anselm-cluster-documentation/software/mpi/Running_OpenMPI.md +bysocket +bycore + - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md +gcc3.3.3 +pthread +fftw3 +lfftw3 +_threads-lfftw3 +_omp +icc3.3.3 +FFTW2 +gcc2.1.5 +fftw2 +lfftw +_threads +icc2.1.5 +fftw-mpi3 +_mpi +fftw3-mpi +fftw2-mpi +IntelMPI + - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/gsl.md +dwt.c +mkl +lgsl + - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md +icc +hdf5 +_INC +_SHLIB +_CPP +_LIB +_F90 +gcc49 + - docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md +_Dist + - docs.it4i/anselm-cluster-documentation/software/nvidia-cuda.md +lcublas + - docs.it4i/anselm-cluster-documentation/software/operating-system.md +6.x + - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/cygwin-and-x11-forwarding.md +startxwin +cygwin64binXWin.exe +tcp + - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md +Xming +XWin.exe. + - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/pageant.md +_rsa.ppk + - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/puttygen.md +_keys +organization.example.com +_rsa + - docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md +vpnui.exe + - docs.it4i/salomon/ib-single-plane-topology.md +36-port +Mcell.pdf +r21-r38 +nodes.pdf + - docs.it4i/salomon/introduction.md +E5-2680v3 + - docs.it4i/salomon/network.md +r4i1n0 +r4i1n1 +r4i1n2 +r4i1n3 +ip + - docs.it4i/salomon/software/ansys/setting-license-preferences.md +ansys161 + - docs.it4i/salomon/software/ansys/workbench.md +mpifile.txt +solvehandlers.xml + - docs.it4i/salomon/software/chemistry/phono3py.md +vasprun.xml +disp-XXXXX +disp +_fc3.yaml +ir +_grid +_points.yaml +gofree-cond1 + - docs.it4i/salomon/software/compilers.md +HPF + - docs.it4i/salomon/software/comsol/licensing-and-available-versions.md +ver + - docs.it4i/salomon/software/debuggers/aislinn.md +test.cpp + - docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md +vtune +_update1 + - docs.it4i/salomon/software/debuggers/valgrind.md +EBROOTVALGRIND + - docs.it4i/salomon/software/intel-suite/intel-advisor.md +O2 + - docs.it4i/salomon/software/intel-suite/intel-compilers.md +UV1 diff --git a/docs.it4i/anselm-cluster-documentation/compute-nodes.md b/docs.it4i/anselm-cluster-documentation/compute-nodes.md index 2d4f1707c960531d25e64c206d5895a7a4e10338..34b1bd95b02b5b59d709e66a3a03ef44b8b7dc03 100644 --- a/docs.it4i/anselm-cluster-documentation/compute-nodes.md +++ b/docs.it4i/anselm-cluster-documentation/compute-nodes.md @@ -111,7 +111,7 @@ Memory Architecture - 8 DDR3 DIMMs per node - 4 DDR3 DIMMs per CPU - 1 DDR3 DIMMs per channel - - Data rate support: up to 1600MT/s + - Data rate support: up to 1600 MT/s - Populated memory: 8 x 8 GB DDR3 DIMM 1600 MHz ### Compute Node With GPU or MIC Accelerator @@ -121,7 +121,7 @@ Memory Architecture - 6 DDR3 DIMMs per node - 3 DDR3 DIMMs per CPU - 1 DDR3 DIMMs per channel - - Data rate support: up to 1600MT/s + - Data rate support: up to 1600 MT/s - Populated memory: 6 x 16 GB DDR3 DIMM 1600 MHz ### Fat Compute Node @@ -131,5 +131,5 @@ Memory Architecture - 16 DDR3 DIMMs per node - 8 DDR3 DIMMs per CPU - 2 DDR3 DIMMs per channel - - Data rate support: up to 1600MT/s + - Data rate support: up to 1600 MT/s - Populated memory: 16 x 32 GB DDR3 DIMM 1600 MHz diff --git a/docs.it4i/anselm-cluster-documentation/environment-and-modules.md b/docs.it4i/anselm-cluster-documentation/environment-and-modules.md index 0835ce008f277e982ecf3a297f40c43ecb60a46c..00110f89d8c36053619c4bde2d664133ec83b883 100644 --- a/docs.it4i/anselm-cluster-documentation/environment-and-modules.md +++ b/docs.it4i/anselm-cluster-documentation/environment-and-modules.md @@ -25,7 +25,7 @@ fi ``` !!! Note "Note" - Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Conside utilization of SSH session interactivity for such commands as stated in the previous example. + Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (SCP, PBS) of your account! Consider utilization of SSH session interactivity for such commands as stated in the previous example. ### Application Modules diff --git a/docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md b/docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md index b500a5a2be7cc0275961858008136d8de903b1c7..4a16a5608fcac9a180aadf4282ed6a43e0963a02 100644 --- a/docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md +++ b/docs.it4i/anselm-cluster-documentation/job-submission-and-execution.md @@ -326,7 +326,7 @@ cd $SCRDIR || exit cp $PBS_O_WORKDIR/input . cp $PBS_O_WORKDIR/mympiprog.x . -# load the mpi module +# load the MPI module module load openmpi # execute the calculation @@ -362,7 +362,7 @@ Example jobscript for an MPI job with preloaded inputs and executables, options SCRDIR=/scratch/$USER/myjob cd $SCRDIR || exit -# load the mpi module +# load the MPI module module load openmpi # execute the calculation diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md index 1693bc723792cad5ae5cabbc979a5b2a2a525b4b..8137eee84437bc787bfba1cadfe2faced3186c8f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-cfx.md @@ -50,7 +50,7 @@ echo Machines: $hl Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**. [More about licensing here](licensing/) diff --git a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md index d712659f5794a75e75c96f64c0db3d107cfb2822..bf6a0844756c1523089cb2395c47dbe03df668aa 100644 --- a/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md +++ b/docs.it4i/anselm-cluster-documentation/software/ansys/ansys-ls-dyna.md @@ -1,7 +1,7 @@ ANSYS LS-DYNA ============= -**[ANSYSLS-DYNA](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/Explicit+Dynamics/ANSYS+LS-DYNA)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. +**[ANSYS LS-DYNA](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/Explicit+Dynamics/ANSYS+LS-DYNA)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. To run ANSYS LS-DYNA in batch mode you can utilize/modify the default ansysdyna.pbs script and execute it via the qsub command. diff --git a/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md b/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md index 0318c4a119730a4cabb6163287bb3dc5d2ef236d..58a67079fafddcaef3def9dc1b6066d81b07da3f 100644 --- a/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md +++ b/docs.it4i/anselm-cluster-documentation/software/chemistry/nwchem.md @@ -42,4 +42,4 @@ Options Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : - MEMORY : controls the amount of memory NWChem will use -- SCRATCH_DIR : set this to a directory in [SCRATCH file system](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g.. "scf direct" +- SCRATCH_DIR : set this to a directory in [SCRATCH file system](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g.. "SCF direct" diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md index e09cd64a3f7724af581315d1e3a936b84faa381f..9ae809c2160955ef496b791b07d9ff99baa15622 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-ddt.md @@ -5,7 +5,7 @@ Allinea Forge consist of two tools - debugger DDT and profiler MAP. Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process - even if these processes are distributed across a cluster using an MPI implementation. -Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses pthreads, OpenMP or MPI. +Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses Pthreads, OpenMP or MPI. License and Limitations for Anselm Users ---------------------------------------- diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md index a563ec561798a03620183c58d3b647527a1316ab..2ba001dcdd617b64d66cd4c39b0e41928a605a00 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/allinea-performance-reports.md @@ -32,7 +32,7 @@ Instead of [running your MPI program the usual way](../mpi/), use the the perf r $ perf-report mpirun ./mympiprog.x ``` -The mpi program will run as usual. The perf-report creates two additional files, in *.txt and *.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../resource-allocation-and-job-execution/job-submission-and-execution/). +The MPI program will run as usual. The perf-report creates two additional files, in *.txt and *.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../resource-allocation-and-job-execution/job-submission-and-execution/). Example ------- diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md index a416deab5cd4873a6e9a19877d6b0545c7ea5dfc..ae41c497d0184733f83887d7af85737bc0f7e9dd 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/cube.md @@ -31,7 +31,7 @@ CUBE is a graphical application. Refer to Graphical User Interface documentation !!! Note "Note" Analyzing large data sets can consume large amount of CPU and RAM. Do not perform large analysis on login nodes. -After loading the appropriate module, simply launch cube command, or alternatively you can use scalasca -examine command to launch the GUI. Note that for Scalasca datasets, if you do not analyze the data with scalasca -examine before to opening them with CUBE, not all performance data will be available. +After loading the appropriate module, simply launch cube command, or alternatively you can use Scalasca -examine command to launch the GUI. Note that for Scalasca data sets, if you do not analyze the data with `scalasca -examine` before to opening them with CUBE, not all performance data will be available. References 1. <http://www.scalasca.org/software/cube-4.x/download.html> diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md index 3bc686243940268ae0f58a52ea06ac6904a156f4..ed126cfa2ea38e5499cf06a03791dc381859781d 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/papi.md @@ -11,13 +11,13 @@ PAPI can be used with parallel as well as serial programs. Usage ----- -To use PAPI, load [module](../../environment-and-modules/) papi: +To use PAPI, load [module](../../environment-and-modules/) PAPI: ```bash $ module load papi ``` -This will load the default version. Execute module avail papi for a list of installed versions. +This will load the default version. Execute module avail pap for a list of installed versions. Utilities -------- diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md index 76e227f196f4e834457f4becc4648b917e6cf2a8..f74e9dfb33dad154fd64dc37f54a801b395e6dd4 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/scalasca.md @@ -24,13 +24,13 @@ Profiling a parallel application with Scalasca consists of three steps: ### Instrumentation -Instrumentation via " scalasca -instrument" is discouraged. Use [Score-P instrumentation](score-p/). +Instrumentation via `scalasca -instrument` is discouraged. Use [Score-P instrumentation](score-p/). ### Runtime measurement -After the application is instrumented, runtime measurement can be performed with the " scalasca -analyze" command. The syntax is: +After the application is instrumented, runtime measurement can be performed with the `scalasca -analyze` command. The syntax is: -scalasca -analyze [scalasca options] [launcher] [launcher options] [program] [program options] +`scalasca -analyze [scalasca options] [launcher] [launcher options] [program] [program options]` An example : @@ -62,7 +62,7 @@ If you do not wish to launch the GUI tool, use the "-s" option : scalasca -examine -s <experiment_directory> ``` -Alternatively you can open CUBE and load the data directly from here. Keep in mind that in that case the preprocessing is not done and not all metrics will be shown in the viewer. +Alternatively you can open CUBE and load the data directly from here. Keep in mind that in that case the pre-processing is not done and not all metrics will be shown in the viewer. Refer to [CUBE documentation](cube/) on usage of the GUI viewer. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md index 1b0919431ac1fcb01425837d0087374d6ac6685e..fa5603a06985b4dfafd7d2b85d037493eb5bdb95 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/valgrind.md @@ -260,4 +260,4 @@ Prints this output : (note that there is output printed for every launched MPI p ==31319== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 4 from 4) ``` -We can see that Valgrind has reported use of unitialised memory on the master process (which reads the array to be broadcast) and use of unaddresable memory on both processes. +We can see that Valgrind has reported use of uninitialized memory on the master process (which reads the array to be broadcast) and use of unaddressable memory on both processes. diff --git a/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md b/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md index 129eb41ddbf9443e606f8254bb654846fe570877..d7f7e7c71c09d2474b30c392005cdbc08b9e2d86 100644 --- a/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md +++ b/docs.it4i/anselm-cluster-documentation/software/debuggers/vampir.md @@ -1,4 +1,4 @@ -hVampir +Vampir ====== Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](../../../salomon/software/debuggers/score-p/)) first to collect the traces. diff --git a/docs.it4i/anselm-cluster-documentation/software/gpi2.md b/docs.it4i/anselm-cluster-documentation/software/gpi2.md index d61fbed6f984945d0751ae4abf6e2e241ddffc1b..eb1c175b28427da217d03de6d4921b993327238d 100644 --- a/docs.it4i/anselm-cluster-documentation/software/gpi2.md +++ b/docs.it4i/anselm-cluster-documentation/software/gpi2.md @@ -160,7 +160,7 @@ Submit the job and run the GPI-2 application Hello from rank 0 of 2 ``` -At the same time, in another session, you may start the gaspi logger: +At the same time, in another session, you may start the GASPI logger: ```bash $ ssh cn79 diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md index 75ea441489d47ed7d5ea7f4e575e54ccffeba6c6..e2607fd95a422db3cbf7fb9bdc982b2eea287271 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md +++ b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-compilers.md @@ -1,7 +1,7 @@ Intel Compilers =============== -The Intel compilers version 13.1.1 are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler. +The Intel compilers version 13.1.1 are available, via module Intel. The compilers include the ICC C and C++ compiler and the IFORT Fortran 77/90/95 compiler. ```bash $ module load intel @@ -9,7 +9,7 @@ The Intel compilers version 13.1.1 are available, via module intel. The compiler $ ifort -v ``` -The intel compilers provide for vectorization of the code, via the AVX instructions and support threading parallelization via OpenMP +The Intel compilers provide for vectorization of the code, via the AVX instructions and support threading parallelization via OpenMP For maximum performance on the Anselm cluster, compile your programs using the AVX instructions, with reporting where the vectorization was used. We recommend following compilation options for high performance diff --git a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md index 92e19f9c03e985fc8660d19a4a5df5a09942f4be..b6ddfa12e1b67634e4786d61e1f60a1fc53c4105 100644 --- a/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md +++ b/docs.it4i/anselm-cluster-documentation/software/intel-suite/intel-debugger.md @@ -71,5 +71,5 @@ Run the idb debugger in GUI mode. The menu Parallel contains number of tools for Further information ------------------- -Exhaustive manual on idb features and usage is published at [Intel website](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/debugger/user_guide/index.htm) +Exhaustive manual on IDB features and usage is published at [Intel website](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/debugger/user_guide/index.htm) diff --git a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md b/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md index 2303a969bfbf19d0ae5c1ca4b05ef955a3bd86b2..7c19519292f4c7cbf231584f62bf821b6ac3e732 100644 --- a/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md +++ b/docs.it4i/anselm-cluster-documentation/software/isv_licenses.md @@ -64,11 +64,11 @@ The general format of the name is: Names of applications (APP): -- ansys +- ANSYS - comsol - comsol-edu -- matlab -- matlab-edu +- MATLAB +- MATLAB-EDU To get the FEATUREs of a license take a look into the corresponding state file ([see above](isv_licenses/#Licence)), or use: diff --git a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md b/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md index 508fe8bf91326cf468ce398d2e2ae275a02c8ff4..a1b5ed7338a6c65a6723c844034c77f50973365c 100644 --- a/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md +++ b/docs.it4i/anselm-cluster-documentation/software/kvirtualization.md @@ -76,7 +76,7 @@ QEMU currently supports these image types or formats: - vmdk - VMware 3 & 4, or 6 image format, for exchanging images with that product - vdi - VirtualBox 1.1 compatible image format, for exchanging images with VirtualBox. -You can convert your existing image using qemu-img convert command. Supported formats of this command are: blkdebug blkverify bochs cloop cow dmg file ftp ftps host_cdrom host_device host_floppy http https nbd parallels qcow qcow2 qed raw sheepdog tftp vdi vhdx vmdk vpc vvfat. +You can convert your existing image using `qemu-img convert` command. Supported formats of this command are: `blkdebug blkverify bochs cloop cow dmg file ftp ftps host_cdrom host_device host_floppy http https nbd parallels qcow qcow2 qed raw sheepdog tftp vdi vhdx vmdk vpc vvfat`. We recommend using advanced QEMU native image format qcow2. @@ -92,7 +92,7 @@ Remove all unnecessary software and files. Remove all paging space, swap files, partitions, etc. -Shrink your image. (It is recommended to zero all free space and reconvert image using qemu-img.) +Shrink your image. (It is recommended to zero all free space and reconvert image using `qemu-img`.) ### Modify your image for running jobs @@ -230,7 +230,7 @@ Run virtual machine (simple) $ qemu-system-x86_64 -hda win.img -enable-kvm -cpu host -smp 16 -m 32768 -vga std -localtime -usb -usbdevice tablet -vnc :0 ``` -You can access virtual machine by VNC viewer (option -vnc) connecting to IP address of compute node. For VNC you must use VPN network. +You can access virtual machine by VNC viewer (option `-vnc`) connecting to IP address of compute node. For VNC you must use VPN network. Install virtual machine from ISO file @@ -303,7 +303,7 @@ Run SLIRP daemon over SSH tunnel on login node and connect it to virtual network $ dpipe vde_plug /tmp/sw0 = ssh login1 $VDE2_DIR/bin/slirpvde -s - --dhcp & ``` -Run qemu using vde network back-end, connect to created virtual switch. +Run QEMU using VDE network back-end, connect to created virtual switch. Basic setup (obsolete syntax) @@ -325,11 +325,11 @@ Optimized setup **TAP interconnect** -Both user and vde network back-end have low performance. For fast interconnect (10 Gbit/s and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device. +Both user and VDE network back-end have low performance. For fast interconnect (10 Gbit/s and more) of compute node (host) and virtual machine (guest) we suggest using Linux kernel TAP device. Cluster Anselm provides TAP device tap0 for your job. TAP interconnect does not provide any services (like NAT, DHCP, DNS, SMB, etc.) just raw networking, so you should provide your services if you need them. -Run qemu with TAP network back-end: +Run QEMU with TAP network back-end: ```bash $ qemu-system-x86_64 ... -device virtio-net-pci,netdev=net1 @@ -340,9 +340,9 @@ Interface tap0 has IP address 192.168.1.1 and network mask 255.255.255.0 (/24). Redirected ports: -- DNS udp/53->udp/3053, tcp/53->tcp3053 -- DHCP udp/67->udp3067 -- SMB tcp/139->tcp3139, tcp/445->tcp3445). +- DNS UDP/53->UDP/3053, TCP/53->TCP/3053 +- DHCP UDP/67->UDP/3067 +- SMB TCP/139->TCP/3139, TCP/445->TCP/3445). You can configure IP address of virtual machine statically or dynamically. For dynamic addressing provide your DHCP server on port 3067 of tap0 interface, you can also provide your DNS server on port 3053 of tap0 interface for example: diff --git a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md index 5f81e8ee4eb3855c549bf3f558431afd3073e3f7..676c19ae150a2c53ae9b887799b39fa6f88bff9e 100644 --- a/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md +++ b/docs.it4i/anselm-cluster-documentation/software/mpi/mpi.md @@ -112,7 +112,7 @@ Running MPI Programs The MPI program executable must be compatible with the loaded MPI module. Always compile and execute using the very same MPI module. -It is strongly discouraged to mix mpi implementations. Linking an application with one MPI implementation and running mpirun/mpiexec form other implementation may result in unexpected errors. +It is strongly discouraged to mix MPI implementations. Linking an application with one MPI implementation and running mpirun/mpiexec form other implementation may result in unexpected errors. The MPI program executable must be available within the same path on all nodes. This is automatically fulfilled on the /home and /scratch file system. You need to preload the executable, if running on the local scratch /lscratch file system. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md index dbe107990dfa538f177344017b77a58a80117c5a..3a09a39f0638c6ebd697dc673aaca8ee8ae34536 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab.md @@ -273,8 +273,8 @@ You can use MATLAB on UV2000 in two parallel modes: ### Threaded mode -Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as fft, , eig, svd, etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes. +Since this is a SMP machine, you can completely avoid using Parallel Toolbox and use only MATLAB's threading. MATLAB will automatically detect the number of cores you have allocated and will set maxNumCompThreads accordingly and certain operations, such as `fft`, `eig`, `svd` etc. will be automatically run in threads. The advantage of this mode is that you don't need to modify your existing sequential codes. ### Local cluster mode -You can also use Parallel Toolbox on UV2000. Use l[ocal cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work. +You can also use Parallel Toolbox on UV2000. Use [local cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md index 84b2897ea299701773c9d9736d2af4ed52e2ed3a..3893758a5d450be0c4b5fbb53f8ce97cb7b1d406 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-languages/matlab_1314.md @@ -83,7 +83,7 @@ Once this file is in place, user can request resources from PBS. Following examp -l feature__matlab__MATLAB=1 ``` -This qsub command example shows how to run Matlab with 32 workers in following configuration: 2 nodes (use all 16 cores per node) and 16 workers = mpirocs per node (-l select=2:ncpus=16:mpiprocs=16). If user requires to run smaller number of workers per node then the "mpiprocs" parameter has to be changed. +This qsub command example shows how to run Matlab with 32 workers in following configuration: 2 nodes (use all 16 cores per node) and 16 workers = mpiprocs per node (-l select=2:ncpus=16:mpiprocs=16). If user requires to run smaller number of workers per node then the "mpiprocs" parameter has to be changed. The second part of the command shows how to request all necessary licenses. In this case 1 Matlab-EDU license and 32 Distributed Computing Engines licenses. diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md index 8920a402fdc3f1d614a71cebc15bbda6313f115e..e91dbb564edcd13dfe42a6837d57574f91976ee1 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/fftw.md @@ -5,7 +5,7 @@ The discrete Fourier transform in one or more dimensions, MPI parallel FFTW is a C subroutine library for computing the discrete Fourier transform in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, e.g. the discrete cosine/sine transforms or DCT/DST). The FFTW library allows for MPI parallel, in-place discrete Fourier transform, with data distributed over number of nodes. -Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules: +Two versions, **3.3.3** and **2.1.5** of FFTW are available on Anselm, each compiled for **Intel MPI** and **OpenMPI** using **Intel** and **gnu** compilers. These are available via modules: |Version |Parallelization |module |linker options | | --- | --- | diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md index ae758f5e09d24495f2e64d340230d59448eb4b37..b4bed2af8def92a462d1f6081aa7b81118c4bd98 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/hdf5.md @@ -5,7 +5,7 @@ Hierarchical Data Format library. Serial and MPI parallel version. [HDF5 (Hierarchical Data Format)](http://www.hdfgroup.org/HDF5/) is a general purpose library and file format for storing scientific data. HDF5 can store two primary objects: datasets and groups. A dataset is essentially a multidimensional array of data elements, and a group is a structure for organizing objects in an HDF5 file. Using these two basic objects, one can create and store almost any kind of scientific data structure, such as images, arrays of vectors, and structured and unstructured grids. You can also mix and match them in HDF5 files according to your needs. -Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, compiled for **Intel MPI** and **OpenMPI** using **intel** and **gnu** compilers. These are available via modules: +Versions **1.8.11** and **1.8.13** of HDF5 library are available on Anselm, compiled for **Intel MPI** and **OpenMPI** using **Intel** and **gnu** compilers. These are available via modules: |Version |Parallelization |module |C linker options|C++ linker options|Fortran linker options | | --- | --- | diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md index 8cdcd2b8e57550ccc2a5baeedc71835950121aee..0d4d94f7befae18a1231c6f358d5be1717b43d1b 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/petsc.md @@ -34,14 +34,14 @@ PETSc needs at least MPI, BLAS and LAPACK. These dependencies are currently sati PETSc can be linked with a plethora of [external numerical libraries](http://www.mcs.anl.gov/petsc/miscellaneous/external.html), extending PETSc functionality, e.g. direct linear system solvers, preconditioners or partitioners. See below a list of libraries currently included in Anselm `petsc` modules. All these libraries can be used also alone, without PETSc. Their static or shared program libraries are available in -`$PETSC_DIR/$PETSC_ARCH/lib` and header files in `$PETSC_DIR/$PETSC_ARCH/include`. `PETSC_DIR` and `PETSC_ARCH` are environment variables pointing to a specific PETSc instance based on the petsc module loaded. +`$PETSC_DIR/$PETSC_ARCH/lib` and header files in `$PETSC_DIR/$PETSC_ARCH/include`. `PETSC_DIR` and `PETSC_ARCH` are environment variables pointing to a specific PETSc instance based on the PETSc module loaded. ### Libraries linked to PETSc on Anselm (as of 11 April 2015) - dense linear algebra - [Elemental](http://libelemental.org/) - sparse linear system solvers - - [Intel MKL Pardiso](https://software.intel.com/en-us/node/470282) + - [Intel MKL PARDISO](https://software.intel.com/en-us/node/470282) - [MUMPS](http://mumps.enseeiht.fr/) - [PaStiX](http://pastix.gforge.inria.fr/) - [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html) @@ -57,6 +57,6 @@ All these libraries can be used also alone, without PETSc. Their static or share - [ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview) - [PT-Scotch](http://www.labri.fr/perso/pelegrin/scotch/) - preconditioners & multigrid - - [Hypre](http://acts.nersc.gov/hypre/) + - [HYPRE](http://acts.nersc.gov/hypre/) - [Trilinos ML](http://trilinos.sandia.gov/packages/ml/) - [SPAI - Sparse Approximate Inverse](https://bitbucket.org/petsc/pkg-spai) diff --git a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md index e4ca472867371b26240e5d5ade9f0c9f441d265e..065496928ab0c32160cb4f6924006794e2722a81 100644 --- a/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md +++ b/docs.it4i/anselm-cluster-documentation/software/numerical-libraries/trilinos.md @@ -35,7 +35,7 @@ First, load the appropriate module: For the compilation of CMake-aware project, Trilinos provides the FIND_PACKAGE( Trilinos ) capability, which makes it easy to build against Trilinos, including linking against the correct list of libraries. For details, see <http://trilinos.sandia.gov/Finding_Trilinos.txt> -For compiling using simple makefiles, Trilinos provides Makefile.export system, which allows users to include important Trilinos variables directly into their makefiles. This can be done simply by inserting the following line into the makefile: +For compiling using simple Makefiles, Trilinos provides Makefile.export system, which allows users to include important Trilinos variables directly into their Makefiles. This can be done simply by inserting the following line into the Makefile: ```bash include Makefile.export.Trilinos @@ -47,4 +47,4 @@ or include Makefile.export.<package> ``` -if you are interested only in a specific Trilinos package. This will give you access to the variables such as Trilinos_CXX_COMPILER, Trilinos_INCLUDE_DIRS, Trilinos_LIBRARY_DIRS etc. For the detailed description and example makefile see <http://trilinos.sandia.gov/Export_Makefile.txt>. +if you are interested only in a specific Trilinos package. This will give you access to the variables such as Trilinos_CXX_COMPILER, Trilinos_INCLUDE_DIRS, Trilinos_LIBRARY_DIRS etc. For the detailed description and example Makefile see <http://trilinos.sandia.gov/Export_Makefile.txt>. diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md b/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md index 0fbdc214f727d55f454f844dfde02b9c2eca226d..8a2bdc666c7012bf5a2c151612d60ef3022ef750 100644 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md +++ b/docs.it4i/anselm-cluster-documentation/software/omics-master/diagnostic-component-team.md @@ -12,7 +12,7 @@ TEAM is available at the following address: <http://omics.it4i.cz/team/> VCF files are scanned by this diagnostic tool for known diagnostic disease-associated variants. When no diagnostic mutation is found, the file can be sent to the disease-causing gene discovery tool to see whether new disease associated variants can be found. -TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between the predicted mutations and the final diagnostic in targeted enrichment sequencing analysis. The tool searches for known diagnostic mutations, corresponding to a disease panel, among the predicted patient’s variants. Diagnostic variants for the disease are taken from four databases of disease-related variants (HGMD-public, HUMSAVAR , ClinVar and COSMIC) If no primary diagnostic variant is found, then a list of secondary findings that can help to establish a diagnostic is produced. TEAM also provides with an interface for the definition of and customization of panels, by means of which, genes and mutations can be added or discarded to adjust panel definitions. +TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between the predicted mutations and the final diagnostic in targeted enrichment sequencing analysis. The tool searches for known diagnostic mutations, corresponding to a disease panel, among the predicted patient’s variants. Diagnostic variants for the disease are taken from four databases of disease-related variants (HGMD, HUMSAVAR , ClinVar and COSMIC) If no primary diagnostic variant is found, then a list of secondary findings that can help to establish a diagnostic is produced. TEAM also provides with an interface for the definition of and customization of panels, by means of which, genes and mutations can be added or discarded to adjust panel definitions.  diff --git a/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md b/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md index 8b5cb8cf6ae3cdf00649ea640ec417920d3ad76c..a2b0faf628961ea2daa9bed053f2a1df65bdf87e 100644 --- a/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md +++ b/docs.it4i/anselm-cluster-documentation/software/omics-master/priorization-component-bierapp.md @@ -6,7 +6,7 @@ Prioritization component (BiERapp) BiERapp is available at the following address: <http://omics.it4i.cz/bierapp/> !!! Note "Note" - The address is accessible onlyvia VPN. + The address is accessible only via VPN. ###BiERapp diff --git a/docs.it4i/anselm-cluster-documentation/software/paraview.md b/docs.it4i/anselm-cluster-documentation/software/paraview.md index b9deba00e7e363e2089427f58dac808daba62585..5215eb4965ad8382556b975155e2f9c97a171f40 100644 --- a/docs.it4i/anselm-cluster-documentation/software/paraview.md +++ b/docs.it4i/anselm-cluster-documentation/software/paraview.md @@ -55,7 +55,7 @@ Because a direct connection is not allowed to compute nodes on Anselm, you must ssh -TN -L 12345:cn77:11111 username@anselm.it4i.cz ``` -replace username with your login and cn77 with the name of compute node your ParaView server is running on (see previous step). If you use PuTTY on Windows, load Anselm connection configuration, t>hen go to Connection-> SSH>->Tunnels to set up the port forwarding. Click Remote radio button. Insert 12345 to Source port textbox. Insert cn77:11111. Click Add button, then Open. +replace username with your login and cn77 with the name of compute node your ParaView server is running on (see previous step). If you use PuTTY on Windows, load Anselm connection configuration, t>hen go to Connection-> SSH>->Tunnels to set up the port forwarding. Click Remote radio button. Insert 12345 to Source port text box. Insert cn77:11111. Click Add button, then Open. Now launch ParaView client installed on your desktop PC. Select File->Connect..., click Add Server. Fill in the following : diff --git a/docs.it4i/anselm-cluster-documentation/storage.md b/docs.it4i/anselm-cluster-documentation/storage.md index d67279a8ae66cfc36af743d7db9850758961643f..b0298793a720b73613cb6e679c94d1cf4ff0640b 100644 --- a/docs.it4i/anselm-cluster-documentation/storage.md +++ b/docs.it4i/anselm-cluster-documentation/storage.md @@ -1,7 +1,7 @@ Storage ======= -There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, ramdisk and tmp file systems. +There are two main shared file systems on Anselm cluster, the [HOME](#home) and [SCRATCH](#scratch). All login and compute nodes may access same data on shared file systems. Compute nodes are also equipped with local (non-shared) scratch, RAM disk and tmp file systems. Archiving --------- @@ -362,7 +362,7 @@ First, create the mount point $ mkdir cesnet ``` -Mount the storage. Note that you can choose among the ssh.du1.cesnet.cz (Plzen), ssh.du2.cesnet.cz (Jihlava), ssh.du3.cesnet.cz (Brno) Mount tier1_home **(only 5120M !)**: +Mount the storage. Note that you can choose among the ssh.du1.cesnet.cz (Plzen), ssh.du2.cesnet.cz (Jihlava), ssh.du3.cesnet.cz (Brno) Mount tier1_home **(only 5120 MB !)**: ```bash $ sshfs username@ssh.du1.cesnet.cz:. cesnet/ @@ -394,16 +394,16 @@ Once done, please remember to unmount the storage $ fusermount -u cesnet ``` -### Rsync access +### RSYNC access !!! Note "Note" - Rsync provides delta transfer for best performance, can resume interrupted transfers + RSYNC provides delta transfer for best performance, can resume interrupted transfers -Rsync is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination. Rsync is widely used for backups and mirroring and as an improved copy command for everyday use. +RSYNC is a fast and extraordinarily versatile file copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination. RSYNC is widely used for backups and mirroring and as an improved copy command for everyday use. -Rsync finds files that need to be transferred using a "quick check" algorithm (by default) that looks for files that have changed in size or in last-modified time. Any changes in the other preserved attributes (as requested by options) are made on the destination file directly when the quick check indicates that the file's data does not need to be updated. +RSYNC finds files that need to be transferred using a "quick check" algorithm (by default) that looks for files that have changed in size or in last-modified time. Any changes in the other preserved attributes (as requested by options) are made on the destination file directly when the quick check indicates that the file's data does not need to be updated. -More about Rsync at <https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele> +More about RSYNC at <https://du.cesnet.cz/en/navody/rsync/start#pro_bezne_uzivatele> Transfer large files to/from CESNET storage, assuming membership in the Storage VO diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md index 9952f60e130976f0e16cedc61accfd84958fdd01..e3c397a174c81af6e8f22f07abf49519b6b7bc67 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md +++ b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/graphical-user-interface/x-window-system.md @@ -129,7 +129,7 @@ However this method does not seem to work with recent Linux distributions and yo Gnome on Windows ---------------- -Use Xlaunch to start the Xming server or run the XWin.exe. Select the "One window" mode. +Use XLaunch to start the Xming server or run the XWin.exe. Select the "One window" mode. Log in to the cluster, using PuTTY. On the cluster, run the gnome-session command. diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md index ba5c29fdbe097438b4cc4eb158422c8fb8ec4137..b9a196f0f7c7a7d5bef1946b78eb33c036874c09 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md +++ b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys.md @@ -114,4 +114,4 @@ In this example, we add an additional public key, stored in file additional_key. How to remove your own key -------------------------- -Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (eg. *username@organization.example.com*). +Removing your key from authorized_keys can be done simply by deleting the corresponding public key which can be identified by a comment at the end of line (e.g. *username@organization.example.com*). diff --git a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md index 03e8702677432f344a816a28378c2a9780007ece..ae2715a31e79b535123b6274959ef29fd808847a 100644 --- a/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md +++ b/docs.it4i/get-started-with-it4innovations/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md @@ -8,7 +8,7 @@ AnyConnect users on Windows 8.1 will receive a "Failed to initialize connection **Workaround:** - Close the Cisco AnyConnect Window and the taskbar mini-icon -- Right click vpnui.exe in the 'Cisco AnyConnect Secure Mobility Client' folder. (C:Program Files (x86)CiscoCisco AnyConnect Secure Mobility Client) +- Right click vpnui.exe in the 'Cisco AnyConnect Secure Mobility Client' folder. (C:\\Program Files (x86)\\Cisco\\Cisco AnyConnect Secure Mobility Client) - Click on the 'Run compatibility troubleshooter' button - Choose 'Try recommended settings' - The wizard suggests Windows 8 compatibility. diff --git a/docs.it4i/salomon/environment-and-modules.md b/docs.it4i/salomon/environment-and-modules.md index b6ae85042866e704d9e5d67e58ffbc9c1c3207c4..ff1a2ac2bf322a5ee08eb4a74f8fdcf0c99f8a77 100644 --- a/docs.it4i/salomon/environment-and-modules.md +++ b/docs.it4i/salomon/environment-and-modules.md @@ -25,7 +25,7 @@ fi ``` !!! Note "Note" - Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (scp, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. + Do not run commands outputting to standard output (echo, module list, etc) in .bashrc for non-interactive SSH sessions. It breaks fundamental functionality (SCP, PBS) of your account! Take care for SSH session interactivity for such commands as stated in the previous example. How to using modules in examples: <tty-player controls src=/src/salomon/modules_salomon.ttyrec></tty-player> diff --git a/docs.it4i/salomon/hardware-overview.md b/docs.it4i/salomon/hardware-overview.md index a7465b809e8664fd968b187502dfcbe1ebb47da9..329faf809c1be2d6d9611de41e6447044d6e63bd 100644 --- a/docs.it4i/salomon/hardware-overview.md +++ b/docs.it4i/salomon/hardware-overview.md @@ -55,6 +55,6 @@ For large memory computations a special SMP/NUMA SGI UV 2000 server is available |Node |Count |Processor |Cores|Memory|Extra HW | | --- | --- | --- | --- | --- | --- | -|UV2000 |1 |14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores |112 |3328 GB DDR3@1866 MHz |2 x 400GB local SSD</br>1x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM | +|UV2000 |1 |14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores |112 |3328 GB DDR3@1866 MHz |2 x 400GB local SSD</br>1 x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM |  diff --git a/docs.it4i/salomon/introduction.md b/docs.it4i/salomon/introduction.md index 87950f42243c3eb1829edd44122cea8d15c1e721..95921f82c512c8ca9daecceb0a44ff435584c22c 100644 --- a/docs.it4i/salomon/introduction.md +++ b/docs.it4i/salomon/introduction.md @@ -1,7 +1,7 @@ Introduction ============ -Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128 GB RAM. Nodes are interconnected by 7D Enhanced hypercube InfiniBand network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). +Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totaling 24192 compute cores with 129 TB RAM and giving over 2 PFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, at least 128 GB RAM. Nodes are interconnected by 7D Enhanced hypercube InfiniBand network and equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). The cluster runs [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [ Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md index 23f97bb9bae22abde7943c8fb9bc42fa708a3748..aa07a44d7a746932c65f070b6fd6fd1d35ebeff7 100644 --- a/docs.it4i/salomon/job-submission-and-execution.md +++ b/docs.it4i/salomon/job-submission-and-execution.md @@ -426,7 +426,7 @@ cd $SCRDIR || exit cp $PBS_O_WORKDIR/input . cp $PBS_O_WORKDIR/mympiprog.x . -# load the mpi module +# load the MPI module module load OpenMPI # execute the calculation @@ -464,7 +464,7 @@ Example jobscript for an MPI job with preloaded inputs and executables, options SCRDIR=/scratch/work/user/$USER/myjob cd $SCRDIR || exit -# load the mpi module +# load the MPI module module load OpenMPI # execute the calculation diff --git a/docs.it4i/salomon/software/ansys/ansys-cfx.md b/docs.it4i/salomon/software/ansys/ansys-cfx.md index 9bd7ced93f1ec946a86e598d975cbbb35b7b552f..8137eee84437bc787bfba1cadfe2faced3186c8f 100644 --- a/docs.it4i/salomon/software/ansys/ansys-cfx.md +++ b/docs.it4i/salomon/software/ansys/ansys-cfx.md @@ -48,9 +48,9 @@ echo Machines: $hl /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the cfx solver via parameter -def +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**. [More about licensing here](licensing/) diff --git a/docs.it4i/salomon/software/ansys/ansys-fluent.md b/docs.it4i/salomon/software/ansys/ansys-fluent.md index 4d8aa00357dd32dd82262acb921683df5f25fbd2..9d9237de9b43c88617e6bbb19e908916d1705204 100644 --- a/docs.it4i/salomon/software/ansys/ansys-fluent.md +++ b/docs.it4i/salomon/software/ansys/ansys-fluent.md @@ -4,7 +4,7 @@ ANSYS Fluent [ANSYS Fluent](http://www.ansys.com/Products/Simulation+Technology/Fluid+Dynamics/Fluid+Dynamics+Products/ANSYS+Fluent) software contains the broad physical modeling capabilities needed to model flow, turbulence, heat transfer, and reactions for industrial applications ranging from air flow over an aircraft wing to combustion in a furnace, from bubble columns to oil platforms, from blood flow to semiconductor manufacturing, and from clean room design to wastewater treatment plants. Special models that give the software the ability to model in-cylinder combustion, aeroacoustics, turbomachinery, and multiphase systems have served to broaden its reach. -1. Common way to run Fluent over pbs file +1. Common way to run Fluent over PBS file ------------------------------------------------------ To run ANSYS Fluent in batch mode you can utilize/modify the default fluent.pbs script and execute it via the qsub command. diff --git a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md index c2ce777868c34566878c2c7b20d520ddb45bee14..067c502c909f4418d616b547f69ee51945af45f5 100644 --- a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md +++ b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md @@ -1,7 +1,7 @@ ANSYS LS-DYNA ============= -**[ANSYSLS-DYNA](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/Explicit+Dynamics/ANSYS+LS-DYNA)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. +**[ANSYS LS-DYNA](http://www.ansys.com/Products/Simulation+Technology/Structural+Mechanics/Explicit+Dynamics/ANSYS+LS-DYNA)** software provides convenient and easy-to-use access to the technology-rich, time-tested explicit solver without the need to contend with the complex input requirements of this sophisticated program. Introduced in 1996, ANSYS LS-DYNA capabilities have helped customers in numerous industries to resolve highly intricate design issues. ANSYS Mechanical users have been able take advantage of complex explicit solutions for a long time utilizing the traditional ANSYS Parametric Design Language (APDL) environment. These explicit capabilities are available to ANSYS Workbench users as well. The Workbench platform is a powerful, comprehensive, easy-to-use environment for engineering simulation. CAD import from all sources, geometry cleanup, automatic meshing, solution, parametric optimization, result visualization and comprehensive report generation are all available within a single fully interactive modern graphical user environment. To run ANSYS LS-DYNA in batch mode you can utilize/modify the default ansysdyna.pbs script and execute it via the qsub command. @@ -51,6 +51,6 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i= +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i= diff --git a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md index 939a599b92f29b07e8f4836d604753c20e57592a..d08d43c8882a489c5cf04c496eddf95d99143665 100644 --- a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md +++ b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md @@ -50,8 +50,8 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -b -dis -p aa_r -i input.dat -o file.out -machines $hl -dir $WORK_DIR ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](../../resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. -Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ansys solver via parameter -i +Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ansys solver via parameter -i **License** should be selected by parameter -p. Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**, aa_r_dy (ANSYS **Academic** AUTODYN) [More about licensing here](licensing/) diff --git a/docs.it4i/salomon/software/ansys/ansys.md b/docs.it4i/salomon/software/ansys/ansys.md index 093bcf9567db939e38673fc9dd5373b1f3d14c37..82d766cd8d861c110a0be1527d9046962005c713 100644 --- a/docs.it4i/salomon/software/ansys/ansys.md +++ b/docs.it4i/salomon/software/ansys/ansys.md @@ -3,7 +3,7 @@ Overview of ANSYS Products **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact please [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM) -Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa_**" in the license feature name. Change of license is realized on command line respectively directly in user's pbs file (see individual products). [ More about licensing here](licensing/) +Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [ More about licensing here](licensing/) To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module: diff --git a/docs.it4i/salomon/software/ansys/licensing.md b/docs.it4i/salomon/software/ansys/licensing.md index 63842ea29dc85a0480bb5d34d8812d633f45844d..d9c0e7a8359898229335afcc8f4b400fe5d45c46 100644 --- a/docs.it4i/salomon/software/ansys/licensing.md +++ b/docs.it4i/salomon/software/ansys/licensing.md @@ -1,7 +1,7 @@ Licensing and Available Versions ================================ -ANSYS licence can be used by: +ANSYS license can be used by: ----------------------------- - all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.) - all persons who have a valid license diff --git a/docs.it4i/salomon/software/ansys/setting-license-preferences.md b/docs.it4i/salomon/software/ansys/setting-license-preferences.md index 44e0b8bde968c4336d5d7e8cb7e1625ff348cda3..94594236da246c5460d320591bbcb3db62a1fd46 100644 --- a/docs.it4i/salomon/software/ansys/setting-license-preferences.md +++ b/docs.it4i/salomon/software/ansys/setting-license-preferences.md @@ -1,7 +1,7 @@ Setting license preferences =========================== -Some ANSYS tools allow you to explicitly specify usage of academic or commercial licenses in the command line (eg. ansys161 -p aa_r to select Academic Research license). However, we have observed that not all tools obey this option and choose commercial license. +Some ANSYS tools allow you to explicitly specify usage of academic or commercial licenses in the command line (e.g. ansys161 -p aa_r to select Academic Research license). However, we have observed that not all tools obey this option and choose commercial license. Thus you need to configure preferred license order with ANSLIC_ADMIN. Please follow these steps and move Academic Research license to the top or bottom of the list accordingly. diff --git a/docs.it4i/salomon/software/ansys/workbench.md b/docs.it4i/salomon/software/ansys/workbench.md index af5c9f9ff002efff830d1ce687e961e03c92dac7..3f838105a4f66d4fd35d5685f0cec7b9729adcdc 100644 --- a/docs.it4i/salomon/software/ansys/workbench.md +++ b/docs.it4i/salomon/software/ansys/workbench.md @@ -7,13 +7,13 @@ It is possible to run Workbench scripts in batch mode. You need to configure sol  -Enable Distribute Solution checkbox and enter number of cores (eg. 48 to run on two Salomon nodes). If you want the job to run on more then 1 node, you must also provide a so called MPI appfile. In the Additional Command Line Arguments input field, enter: +Enable Distribute Solution checkbox and enter number of cores (e.g. 48 to run on two Salomon nodes). If you want the job to run on more then 1 node, you must also provide a so called MPI appfile. In the Additional Command Line Arguments input field, enter: ```bash -mpifile /path/to/my/job/mpifile.txt ``` -Where /path/to/my/job is the directory where your project is saved. We will create the file mpifile.txt programatically later in the batch script. For more information, refer to *ANSYS Mechanical APDL Parallel Processing* *Guide*. +Where /path/to/my/job is the directory where your project is saved. We will create the file mpifile.txt programmatically later in the batch script. For more information, refer to *ANSYS Mechanical APDL Parallel Processing* *Guide*. Now, save the project and close Workbench. We will use this script to launch the job: diff --git a/docs.it4i/salomon/software/chemistry/nwchem.md b/docs.it4i/salomon/software/chemistry/nwchem.md index 3db648754f6ad50e0ae89758ae825c4cb20956d7..d39e8bcc643b2e24af15d5f8c4e5bac5f32fe356 100644 --- a/docs.it4i/salomon/software/chemistry/nwchem.md +++ b/docs.it4i/salomon/software/chemistry/nwchem.md @@ -44,4 +44,4 @@ Options Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : - MEMORY : controls the amount of memory NWChem will use -- SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct" +- SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g. `scf direct` diff --git a/docs.it4i/salomon/software/chemistry/phono3py.md b/docs.it4i/salomon/software/chemistry/phono3py.md index 5d5487f97eea389aacba3e57566e0ebf865fba13..e134bd99f21fff955e0a53dec96d1a3b9a4221c6 100644 --- a/docs.it4i/salomon/software/chemistry/phono3py.md +++ b/docs.it4i/salomon/software/chemistry/phono3py.md @@ -1,4 +1,4 @@ -Phono3py +phono3py ======== Introduction @@ -89,7 +89,7 @@ Once all jobs are finished and vasprun.xml is created in each disp-XXXXX directo $ phono3py --cf3 disp-{00001..00111}/vasprun.xml ``` -and `disp_fc2.yaml, FORCES_FC2`, `FORCES_FC3` and disp_fc3.yaml should appear and put into the hdf format by +and `disp_fc2.yaml, FORCES_FC2`, `FORCES_FC3` and disp_fc3.yaml should appear and put into the HDF format by ```bash $ phono3py --dim="2 2 2" -c POSCAR diff --git a/docs.it4i/salomon/software/compilers.md b/docs.it4i/salomon/software/compilers.md index b14287af39e8f1f05176f7938bd11c6097e74c1a..e3d04cfeafef7eee190e57dd4df6ecd28a8641ee 100644 --- a/docs.it4i/salomon/software/compilers.md +++ b/docs.it4i/salomon/software/compilers.md @@ -12,7 +12,7 @@ There are several compilers for different programming languages available on the The C/C++ and Fortran compilers are provided by: -Opensource: +Open source: - GNU GCC - Clang/LLVM @@ -85,11 +85,11 @@ Unified Parallel C UPC is supported by two compiler/runtime implementations: - GNU - SMP/multi-threading support only -- Berkley - multi-node support as well as SMP/multi-threading support +- Berkeley - multi-node support as well as SMP/multi-threading support ### GNU UPC Compiler -To use the GNU UPC compiler and run the compiled binaries use the module gupc +To use the GNU UPC compiler and run the compiled binaries use the module GUPC ```bash $ module add gupc @@ -130,18 +130,18 @@ To run the example with 5 threads issue For more information see the man pages. -### Berkley UPC Compiler +### Berkeley UPC Compiler -To use the Berkley UPC compiler and runtime environment to run the binaries use the module bupc +To use the Berkeley UPC compiler and runtime environment to run the binaries use the module BUPC ```bash $ module add BerkeleyUPC/2.16.2-gompi-2015b $ upcc -version ``` -As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only. +As default UPC network the "SMP" is used. This is very quick and easy way for testing/debugging, but limited to one node only. -For production runs, it is recommended to use the native InfiniBand implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended. Please note, that the selection of the network is done at the compile time and not at runtime (as expected)! +For production runs, it is recommended to use the native InfiniBand implementation of UPC network "IBV". For testing/debugging using multiple nodes, the "MPI" UPC network is recommended. Please note, that the selection of the network is done at the compile time and not at runtime (as expected)! Example UPC code: @@ -162,7 +162,7 @@ Example UPC code: } ``` -To compile the example with the "ibv" UPC network use +To compile the example with the "IBV" UPC network use ```bash $ upcc -network=ibv -o hello.upc.x hello.upc diff --git a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md index a9f06a44239e9da93ec0df0e72ab79a38cc1cbe0..92a479182a6bd08c9f496420c86484fb79283997 100644 --- a/docs.it4i/salomon/software/comsol/comsol-multiphysics.md +++ b/docs.it4i/salomon/software/comsol/comsol-multiphysics.md @@ -117,4 +117,4 @@ cd /apps/cae/COMSOL/51/mli matlab -nodesktop -nosplash -r "mphstart; addpath /scratch/work/user/$USER/work; test_job" ``` -This example shows how to run Livelink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The Matlab command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number. +This example shows how to run LiveLink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The Matlab command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number. diff --git a/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md b/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md index e3a4950a772e6942dbd4caeabc2e5d6d885b3a9e..6c325518cb85e4f43cf8d10336440c38d2ee1492 100644 --- a/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md +++ b/docs.it4i/salomon/software/comsol/licensing-and-available-versions.md @@ -1,7 +1,7 @@ Licensing and Available Versions ================================ -Comsol licence can be used by: +Comsol license can be used by: ------------------------------ - all persons in the carrying out of the CE IT4Innovations Project (In addition to the primary licensee, which is VSB - Technical University of Ostrava, users are CE IT4Innovations third parties - CE IT4Innovations project partners, particularly the University of Ostrava, the Brno University of Technology - Faculty of Informatics, the Silesian University in Opava, Institute of Geonics AS CR.) diff --git a/docs.it4i/salomon/software/debuggers/aislinn.md b/docs.it4i/salomon/software/debuggers/aislinn.md index c2a9982448b0bee936940655f406615075d60301..d79408f161fa0aa3afc35b3553fb40554cef18e3 100644 --- a/docs.it4i/salomon/software/debuggers/aislinn.md +++ b/docs.it4i/salomon/software/debuggers/aislinn.md @@ -92,11 +92,11 @@ It shows us: Since the verification is a non-trivial process there are some of limitations. - The verified process has to terminate in all runs, i.e. we cannot answer the halting problem. -- The verification is a computationally and memory demanding process. We put an effort to make it efficient and it is an important point for further research. However covering all runs will be always more demanding than techniques that examines only a single run. The good practise is to start with small instances and when it is feasible, make them bigger. The Aislinn is good to find bugs that are hard to find because they occur very rarely (only in a rare scheduling). Such bugs often do not need big instances. +- The verification is a computationally and memory demanding process. We put an effort to make it efficient and it is an important point for further research. However covering all runs will be always more demanding than techniques that examines only a single run. The good practice is to start with small instances and when it is feasible, make them bigger. The Aislinn is good to find bugs that are hard to find because they occur very rarely (only in a rare scheduling). Such bugs often do not need big instances. - Aislinn expects that your program is a "standard MPI" program, i.e. processes communicate only through MPI, the verified program does not interacts with the system in some unusual ways (e.g. opening sockets). There are also some limitations bounded to the current version and they will be removed in the future: - All files containing MPI calls have to be recompiled by MPI implementation provided by Aislinn. The files that does not contain MPI calls, they do not have to recompiled. Aislinn MPI implementation supports many commonly used calls from MPI-2 and MPI-3 related to point-to-point communication, collective communication, and communicator management. Unfortunately, MPI-IO and one-side communication is not implemented yet. - Each MPI can use only one thread (if you use OpenMP, set OMP_NUM_THREADS to 1). -- There are some limitations for using files, but if the program just reads inputs and writes results, it is ok. +- There are some limitations for using files, but if the program just reads inputs and writes results, it is OK. diff --git a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md index 6ab49b2d779ee27eef400e8ecbf227d58d01aa68..a3b39f2e4de228e541b668890a4f1dd656e373d6 100644 --- a/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/salomon/software/debuggers/allinea-performance-reports.md @@ -31,7 +31,7 @@ Instead of [running your MPI program the usual way](../mpi/mpi/), use the the pe $ perf-report mpirun ./mympiprog.x ``` -The mpi program will run as usual. The perf-report creates two additional files, in *.txt and *.html format, containing the performance report. Note that demanding MPI codes should be run within [ the queue system](../../resource-allocation-and-job-execution/job-submission-and-execution/). +The MPI program will run as usual. The perf-report creates two additional files, in *.txt and *.html format, containing the performance report. Note that demanding MPI codes should be run within [ the queue system](../../resource-allocation-and-job-execution/job-submission-and-execution/). Example ------- diff --git a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md index 332601743958bf114e417f1ab2ce98d21034fa62..6a5f567d85fbff890a5d260c952f974be4014078 100644 --- a/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md +++ b/docs.it4i/salomon/software/debuggers/intel-vtune-amplifier.md @@ -20,7 +20,7 @@ To profile an application with VTune Amplifier, special kernel modules need to b $ qsub -q qexp -A OPEN-0-0 -I -l select=1,vtune=2016_update1 ``` -After that, you can verify the modules sep*, pax and vtsspp are present in the kernel : +After that, you can verify the modules `sep*`, `pax` and `vtsspp` are present in the kernel : ```bash $ lsmod | grep -e sep -e pax -e vtsspp @@ -42,7 +42,7 @@ and launch the GUI : The GUI will open in new window. Click on "New Project..." to create a new project. After clicking OK, a new window with project properties will appear. At "Application:", select the bath to your binary you want to profile (the binary should be compiled with -g flag). Some additional options such as command line arguments can be selected. At "Managed code profiling mode:" select "Native" (unless you want to profile managed mode .NET/Mono applications). After clicking OK, your project is created. -To run a new analysis, click "New analysis...". You will see a list of possible analysis. Some of them will not be possible on the current CPU (eg. Intel Atom analysis is not possible on Sandy bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "Advanced Hotspots". Clicking on Start will start profiling of the application. +To run a new analysis, click "New analysis...". You will see a list of possible analysis. Some of them will not be possible on the current CPU (e.g. Intel Atom analysis is not possible on Sandy bridge CPU), the GUI will show an error box if you select the wrong analysis. For example, select "Advanced Hotspots". Clicking on Start will start profiling of the application. Remote Analysis --------------- diff --git a/docs.it4i/salomon/software/debuggers/valgrind.md b/docs.it4i/salomon/software/debuggers/valgrind.md index df3bda344fc9a0d41599a573e6405632ce2b6983..b2c0d14faa28eff2124adbf5f3e462f188640f7b 100644 --- a/docs.it4i/salomon/software/debuggers/valgrind.md +++ b/docs.it4i/salomon/software/debuggers/valgrind.md @@ -263,4 +263,4 @@ Prints this output : (note that there is output printed for every launched MPI p ==31319== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 4 from 4) ``` -We can see that Valgrind has reported use of unitialised memory on the master process (which reads the array to be broadcasted) and use of unaddresable memory on both processes. +We can see that Valgrind has reported use of uninitialised memory on the master process (which reads the array to be broadcasted) and use of unaddressable memory on both processes. diff --git a/docs.it4i/salomon/software/debuggers/vampir.md b/docs.it4i/salomon/software/debuggers/vampir.md index c19f105f006d40733b80443f42b11b119db6a626..c6188a68047bf0eb7b07e294d1b2e5feaf6ac2a0 100644 --- a/docs.it4i/salomon/software/debuggers/vampir.md +++ b/docs.it4i/salomon/software/debuggers/vampir.md @@ -1,7 +1,7 @@ Vampir ====== -Vampir is a commercial trace analysis and visualisation tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces. +Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces.  diff --git a/docs.it4i/salomon/software/intel-suite/intel-compilers.md b/docs.it4i/salomon/software/intel-suite/intel-compilers.md index 0b61d00afc3b7ecc7122d56994313bfa8dafdc6d..b222edb82995015d0206ee6b67a86721ce586c0f 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-compilers.md +++ b/docs.it4i/salomon/software/intel-suite/intel-compilers.md @@ -1,7 +1,7 @@ Intel Compilers =============== -The Intel compilers in multiple versions are available, via module intel. The compilers include the icc C and C++ compiler and the ifort fortran 77/90/95 compiler. +The Intel compilers in multiple versions are available, via module intel. The compilers include the icc C and C++ compiler and the ifort Fortran 77/90/95 compiler. ```bash $ module load intel @@ -18,7 +18,7 @@ For maximum performance on the Salomon cluster compute nodes, compile your progr $ ifort -ipo -O3 -xCORE-AVX2 -qopt-report1 -qopt-report-phase=vec myprog.f mysubroutines.f -o myprog.x ``` -In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggresive loop optimizations (-O3) and vectorization (-xCORE-AVX2) +In this example, we compile the program enabling interprocedural optimizations between source files (-ipo), aggressive loop optimizations (-O3) and vectorization (-xCORE-AVX2) The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP parallelization and AVX2 vectorization. Enable the OpenMP parallelization by the **-openmp** compiler switch. diff --git a/docs.it4i/salomon/software/intel-suite/intel-debugger.md b/docs.it4i/salomon/software/intel-suite/intel-debugger.md index 7452cbb501860d8117a480bc0d6e524c73e49311..0bd48dce5bfcd61938cfb659f1839291756bdc38 100644 --- a/docs.it4i/salomon/software/intel-suite/intel-debugger.md +++ b/docs.it4i/salomon/software/intel-suite/intel-debugger.md @@ -74,4 +74,4 @@ Run the idb debugger in GUI mode. The menu Parallel contains number of tools for Further information ------------------- -Exhaustive manual on idb features and usage is published at Intel website, <https://software.intel.com/sites/products/documentation/doclib/iss/2013/compiler/cpp-lin/> +Exhaustive manual on IDB features and usage is published at Intel website, <https://software.intel.com/sites/products/documentation/doclib/iss/2013/compiler/cpp-lin/>